diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/_shortw_utils.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/_shortw_utils.ts new file mode 100644 index 00000000000..d5bc2717d58 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/_shortw_utils.ts @@ -0,0 +1,30 @@ +/** + * Utilities for short weierstrass curves, combined with noble-hashes. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { hmac } from '@noble/hashes/hmac' +import { concatBytes, randomBytes } from '@noble/hashes/utils' +import type { CHash } from './abstract/utils.ts' +import { type CurveFn, type CurveType, weierstrass } from './abstract/weierstrass.ts' + +/** connects noble-curves to noble-hashes */ +export function getHash(hash: CHash): { + hash: CHash + hmac: (key: Uint8Array, ...msgs: Uint8Array[]) => Uint8Array + randomBytes: typeof randomBytes +} { + return { + hash, + hmac: (key: Uint8Array, ...msgs: Uint8Array[]) => hmac(hash, key, concatBytes(...msgs)), + randomBytes, + } +} +/** Same API as @noble/hashes, with ability to create curve with custom hash */ +export type CurveDef = Readonly> +export type CurveFnWithCreate = CurveFn & { create: (hash: CHash) => CurveFn } + +export function createCurve(curveDef: CurveDef, defHash: CHash): CurveFnWithCreate { + const create = (hash: CHash): CurveFn => weierstrass({ ...curveDef, ...getHash(hash) }) + return { ...create(defHash), create } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/bls.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/bls.ts new file mode 100644 index 00000000000..37e8eead962 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/bls.ts @@ -0,0 +1,573 @@ +/** + * BLS (Barreto-Lynn-Scott) family of pairing-friendly curves. + * BLS != BLS. + * The file implements BLS (Boneh-Lynn-Shacham) signatures. + * Used in both BLS (Barreto-Lynn-Scott) and BN (Barreto-Naehrig) + * families of pairing-friendly curves. + * Consists of two curves: G1 and G2: + * - G1 is a subgroup of (x, y) E(Fq) over y² = x³ + 4. + * - G2 is a subgroup of ((x₁, x₂+i), (y₁, y₂+i)) E(Fq²) over y² = x³ + 4(1 + i) where i is √-1 + * - Gt, created by bilinear (ate) pairing e(G1, G2), consists of p-th roots of unity in + * Fq^k where k is embedding degree. Only degree 12 is currently supported, 24 is not. + * Pairing is used to aggregate and verify signatures. + * There are two main ways to use it: + * 1. Fp for short private keys, Fp₂ for signatures + * 2. Fp for short signatures, Fp₂ for private keys + * @module + **/ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +// TODO: import { AffinePoint } from './curve.ts'; +import { + type H2CPointConstructor, + type Opts as HTFOpts, + type MapToCurve, + createHasher, + type htfBasicOpts, +} from './hash-to-curve.ts' +import { type IField, getMinHashLength, mapHashToField } from './modular.ts' +import type { Fp2, Fp2Bls, Fp6, Fp12, Fp12Bls } from './tower.ts' +import { type CHash, type Hex, type PrivKey, ensureBytes, memoized } from './utils.ts' +import { + type CurvePointsRes, + type CurvePointsType, + type ProjPointType, + weierstrassPoints, +} from './weierstrass.ts' + +type Fp = bigint // Can be different field? + +// prettier-ignore +const _0n = BigInt(0), + _1n = BigInt(1), + _2n = BigInt(2), + _3n = BigInt(3) + +export type TwistType = 'multiplicative' | 'divisive' + +export type ShortSignatureCoder = { + fromHex(hex: Hex): ProjPointType + toRawBytes(point: ProjPointType): Uint8Array + toHex(point: ProjPointType): string +} + +export type SignatureCoder = { + fromHex(hex: Hex): ProjPointType + toRawBytes(point: ProjPointType): Uint8Array + toHex(point: ProjPointType): string +} + +export type PostPrecomputePointAddFn = ( + Rx: Fp2, + Ry: Fp2, + Rz: Fp2, + Qx: Fp2, + Qy: Fp2, +) => { Rx: Fp2; Ry: Fp2; Rz: Fp2 } +export type PostPrecomputeFn = ( + Rx: Fp2, + Ry: Fp2, + Rz: Fp2, + Qx: Fp2, + Qy: Fp2, + pointAdd: PostPrecomputePointAddFn, +) => void +export type CurveType = { + G1: Omit, 'n'> & { + ShortSignature: SignatureCoder + mapToCurve: MapToCurve + htfDefaults: HTFOpts + } + G2: Omit, 'n'> & { + Signature: SignatureCoder + mapToCurve: MapToCurve + htfDefaults: HTFOpts + } + fields: { + Fp: IField + Fr: IField + Fp2: Fp2Bls + Fp6: IField + Fp12: Fp12Bls + } + params: { + // NOTE: MSB is always ignored and used as marker for length, + // otherwise leading zeros will be lost. + // Can be different from 'X' (seed) param! + ateLoopSize: bigint + xNegative: boolean + r: bigint + twistType: TwistType // BLS12-381: Multiplicative, BN254: Divisive + } + htfDefaults: HTFOpts + hash: CHash // Because we need outputLen for DRBG + randomBytes: (bytesLength?: number) => Uint8Array + // This is super ugly hack for untwist point in BN254 after miller loop + postPrecompute?: PostPrecomputeFn +} + +type PrecomputeSingle = [Fp2, Fp2, Fp2][] +type Precompute = PrecomputeSingle[] + +export type CurveFn = { + getPublicKey: (privateKey: PrivKey) => Uint8Array + getPublicKeyForShortSignatures: (privateKey: PrivKey) => Uint8Array + sign: { + (message: Hex, privateKey: PrivKey, htfOpts?: htfBasicOpts): Uint8Array + (message: ProjPointType, privateKey: PrivKey, htfOpts?: htfBasicOpts): ProjPointType + } + signShortSignature: { + (message: Hex, privateKey: PrivKey, htfOpts?: htfBasicOpts): Uint8Array + (message: ProjPointType, privateKey: PrivKey, htfOpts?: htfBasicOpts): ProjPointType + } + verify: ( + signature: Hex | ProjPointType, + message: Hex | ProjPointType, + publicKey: Hex | ProjPointType, + htfOpts?: htfBasicOpts, + ) => boolean + verifyShortSignature: ( + signature: Hex | ProjPointType, + message: Hex | ProjPointType, + publicKey: Hex | ProjPointType, + htfOpts?: htfBasicOpts, + ) => boolean + verifyBatch: ( + signature: Hex | ProjPointType, + messages: (Hex | ProjPointType)[], + publicKeys: (Hex | ProjPointType)[], + htfOpts?: htfBasicOpts, + ) => boolean + aggregatePublicKeys: { + (publicKeys: Hex[]): Uint8Array + (publicKeys: ProjPointType[]): ProjPointType + } + aggregateSignatures: { + (signatures: Hex[]): Uint8Array + (signatures: ProjPointType[]): ProjPointType + } + aggregateShortSignatures: { + (signatures: Hex[]): Uint8Array + (signatures: ProjPointType[]): ProjPointType + } + millerLoopBatch: (pairs: [Precompute, Fp, Fp][]) => Fp12 + pairing: (P: ProjPointType, Q: ProjPointType, withFinalExponent?: boolean) => Fp12 + pairingBatch: ( + pairs: { g1: ProjPointType; g2: ProjPointType }[], + withFinalExponent?: boolean, + ) => Fp12 + G1: CurvePointsRes & ReturnType> + G2: CurvePointsRes & ReturnType> + Signature: SignatureCoder + ShortSignature: ShortSignatureCoder + params: { + ateLoopSize: bigint + r: bigint + G1b: bigint + G2b: Fp2 + } + fields: { + Fp: IField + Fp2: Fp2Bls + Fp6: IField + Fp12: Fp12Bls + Fr: IField + } + utils: { + randomPrivateKey: () => Uint8Array + calcPairingPrecomputes: (p: ProjPointType) => Precompute + } +} + +// Not used with BLS12-381 (no sequential `11` in X). Useful for other curves. +function NAfDecomposition(a: bigint) { + const res = [] + // a>1 because of marker bit + for (; a > _1n; a >>= _1n) { + if ((a & _1n) === _0n) res.unshift(0) + else if ((a & _3n) === _3n) { + res.unshift(-1) + a += _1n + } else res.unshift(1) + } + return res +} + +export function bls(CURVE: CurveType): CurveFn { + // Fields are specific for curve, so for now we'll need to pass them with opts + const { Fp, Fr, Fp2, Fp6, Fp12 } = CURVE.fields + const BLS_X_IS_NEGATIVE = CURVE.params.xNegative + const TWIST: TwistType = CURVE.params.twistType + // Point on G1 curve: (x, y) + const G1_ = weierstrassPoints({ n: Fr.ORDER, ...CURVE.G1 }) + const G1 = Object.assign( + G1_, + createHasher(G1_.ProjectivePoint, CURVE.G1.mapToCurve, { + ...CURVE.htfDefaults, + ...CURVE.G1.htfDefaults, + }), + ) + // Point on G2 curve (complex numbers): (x₁, x₂+i), (y₁, y₂+i) + const G2_ = weierstrassPoints({ n: Fr.ORDER, ...CURVE.G2 }) + const G2 = Object.assign( + G2_, + createHasher(G2_.ProjectivePoint as H2CPointConstructor, CURVE.G2.mapToCurve, { + ...CURVE.htfDefaults, + ...CURVE.G2.htfDefaults, + }), + ) + type G1 = typeof G1.ProjectivePoint.BASE + type G2 = typeof G2.ProjectivePoint.BASE + + // Applies sparse multiplication as line function + let lineFunction: (c0: Fp2, c1: Fp2, c2: Fp2, f: Fp12, Px: Fp, Py: Fp) => Fp12 + if (TWIST === 'multiplicative') { + lineFunction = (c0: Fp2, c1: Fp2, c2: Fp2, f: Fp12, Px: Fp, Py: Fp) => + Fp12.mul014(f, c0, Fp2.mul(c1, Px), Fp2.mul(c2, Py)) + } else if (TWIST === 'divisive') { + // NOTE: it should be [c0, c1, c2], but we use different order here to reduce complexity of + // precompute calculations. + lineFunction = (c0: Fp2, c1: Fp2, c2: Fp2, f: Fp12, Px: Fp, Py: Fp) => + Fp12.mul034(f, Fp2.mul(c2, Py), Fp2.mul(c1, Px), c0) + } else throw new Error('bls: unknown twist type') + + const Fp2div2 = Fp2.div(Fp2.ONE, Fp2.mul(Fp2.ONE, _2n)) + function pointDouble(ell: PrecomputeSingle, Rx: Fp2, Ry: Fp2, Rz: Fp2) { + const t0 = Fp2.sqr(Ry) // Ry² + const t1 = Fp2.sqr(Rz) // Rz² + const t2 = Fp2.mulByB(Fp2.mul(t1, _3n)) // 3 * T1 * B + const t3 = Fp2.mul(t2, _3n) // 3 * T2 + const t4 = Fp2.sub(Fp2.sub(Fp2.sqr(Fp2.add(Ry, Rz)), t1), t0) // (Ry + Rz)² - T1 - T0 + const c0 = Fp2.sub(t2, t0) // T2 - T0 (i) + const c1 = Fp2.mul(Fp2.sqr(Rx), _3n) // 3 * Rx² + const c2 = Fp2.neg(t4) // -T4 (-h) + + ell.push([c0, c1, c2]) + + Rx = Fp2.mul(Fp2.mul(Fp2.mul(Fp2.sub(t0, t3), Rx), Ry), Fp2div2) // ((T0 - T3) * Rx * Ry) / 2 + Ry = Fp2.sub(Fp2.sqr(Fp2.mul(Fp2.add(t0, t3), Fp2div2)), Fp2.mul(Fp2.sqr(t2), _3n)) // ((T0 + T3) / 2)² - 3 * T2² + Rz = Fp2.mul(t0, t4) // T0 * T4 + return { Rx, Ry, Rz } + } + function pointAdd(ell: PrecomputeSingle, Rx: Fp2, Ry: Fp2, Rz: Fp2, Qx: Fp2, Qy: Fp2) { + // Addition + const t0 = Fp2.sub(Ry, Fp2.mul(Qy, Rz)) // Ry - Qy * Rz + const t1 = Fp2.sub(Rx, Fp2.mul(Qx, Rz)) // Rx - Qx * Rz + const c0 = Fp2.sub(Fp2.mul(t0, Qx), Fp2.mul(t1, Qy)) // T0 * Qx - T1 * Qy == Ry * Qx - Rx * Qy + const c1 = Fp2.neg(t0) // -T0 == Qy * Rz - Ry + const c2 = t1 // == Rx - Qx * Rz + + ell.push([c0, c1, c2]) + + const t2 = Fp2.sqr(t1) // T1² + const t3 = Fp2.mul(t2, t1) // T2 * T1 + const t4 = Fp2.mul(t2, Rx) // T2 * Rx + const t5 = Fp2.add(Fp2.sub(t3, Fp2.mul(t4, _2n)), Fp2.mul(Fp2.sqr(t0), Rz)) // T3 - 2 * T4 + T0² * Rz + Rx = Fp2.mul(t1, t5) // T1 * T5 + Ry = Fp2.sub(Fp2.mul(Fp2.sub(t4, t5), t0), Fp2.mul(t3, Ry)) // (T4 - T5) * T0 - T3 * Ry + Rz = Fp2.mul(Rz, t3) // Rz * T3 + return { Rx, Ry, Rz } + } + + // Pre-compute coefficients for sparse multiplication + // Point addition and point double calculations is reused for coefficients + // pointAdd happens only if bit set, so wNAF is reasonable. Unfortunately we cannot combine + // add + double in windowed precomputes here, otherwise it would be single op (since X is static) + const ATE_NAF = NAfDecomposition(CURVE.params.ateLoopSize) + + const calcPairingPrecomputes = memoized((point: G2) => { + const p = point + const { x, y } = p.toAffine() + // prettier-ignore + const Qx = x, + Qy = y, + negQy = Fp2.neg(y) + // prettier-ignore + let Rx = Qx, + Ry = Qy, + Rz = Fp2.ONE + const ell: Precompute = [] + for (const bit of ATE_NAF) { + const cur: PrecomputeSingle = [] + ;({ Rx, Ry, Rz } = pointDouble(cur, Rx, Ry, Rz)) + if (bit) ({ Rx, Ry, Rz } = pointAdd(cur, Rx, Ry, Rz, Qx, bit === -1 ? negQy : Qy)) + ell.push(cur) + } + if (CURVE.postPrecompute) { + const last = ell[ell.length - 1] + CURVE.postPrecompute(Rx, Ry, Rz, Qx, Qy, pointAdd.bind(null, last)) + } + return ell + }) + + // Main pairing logic is here. Computes product of miller loops + final exponentiate + // Applies calculated precomputes + type MillerInput = [Precompute, Fp, Fp][] + function millerLoopBatch(pairs: MillerInput, withFinalExponent: boolean = false) { + let f12 = Fp12.ONE + if (pairs.length) { + const ellLen = pairs[0][0].length + for (let i = 0; i < ellLen; i++) { + f12 = Fp12.sqr(f12) // This allows us to do sqr only one time for all pairings + // NOTE: we apply multiple pairings in parallel here + for (const [ell, Px, Py] of pairs) { + for (const [c0, c1, c2] of ell[i]) f12 = lineFunction(c0, c1, c2, f12, Px, Py) + } + } + } + if (BLS_X_IS_NEGATIVE) f12 = Fp12.conjugate(f12) + return withFinalExponent ? Fp12.finalExponentiate(f12) : f12 + } + type PairingInput = { g1: G1; g2: G2 } + // Calculates product of multiple pairings + // This up to x2 faster than just `map(({g1, g2})=>pairing({g1,g2}))` + function pairingBatch(pairs: PairingInput[], withFinalExponent: boolean = true) { + const res: MillerInput = [] + // This cache precomputed toAffine for all points + G1.ProjectivePoint.normalizeZ(pairs.map(({ g1 }) => g1)) + G2.ProjectivePoint.normalizeZ(pairs.map(({ g2 }) => g2)) + for (const { g1, g2 } of pairs) { + if (g1.equals(G1.ProjectivePoint.ZERO) || g2.equals(G2.ProjectivePoint.ZERO)) + throw new Error('pairing is not available for ZERO point') + // This uses toAffine inside + g1.assertValidity() + g2.assertValidity() + const Qa = g1.toAffine() + res.push([calcPairingPrecomputes(g2), Qa.x, Qa.y]) + } + return millerLoopBatch(res, withFinalExponent) + } + // Calculates bilinear pairing + function pairing(Q: G1, P: G2, withFinalExponent: boolean = true): Fp12 { + return pairingBatch([{ g1: Q, g2: P }], withFinalExponent) + } + + const utils = { + randomPrivateKey: (): Uint8Array => { + const length = getMinHashLength(Fr.ORDER) + return mapHashToField(CURVE.randomBytes(length), Fr.ORDER) + }, + calcPairingPrecomputes, + } + + const { ShortSignature } = CURVE.G1 + const { Signature } = CURVE.G2 + + type G1Hex = Hex | G1 + type G2Hex = Hex | G2 + function normP1(point: G1Hex): G1 { + return point instanceof G1.ProjectivePoint ? (point as G1) : G1.ProjectivePoint.fromHex(point) + } + function normP1Hash(point: G1Hex, htfOpts?: htfBasicOpts): G1 { + return point instanceof G1.ProjectivePoint + ? point + : (G1.hashToCurve(ensureBytes('point', point), htfOpts) as G1) + } + function normP2(point: G2Hex): G2 { + return point instanceof G2.ProjectivePoint ? point : Signature.fromHex(point) + } + function normP2Hash(point: G2Hex, htfOpts?: htfBasicOpts): G2 { + return point instanceof G2.ProjectivePoint + ? point + : (G2.hashToCurve(ensureBytes('point', point), htfOpts) as G2) + } + + // Multiplies generator (G1) by private key. + // P = pk x G + function getPublicKey(privateKey: PrivKey): Uint8Array { + return G1.ProjectivePoint.fromPrivateKey(privateKey).toRawBytes(true) + } + + // Multiplies generator (G2) by private key. + // P = pk x G + function getPublicKeyForShortSignatures(privateKey: PrivKey): Uint8Array { + return G2.ProjectivePoint.fromPrivateKey(privateKey).toRawBytes(true) + } + + // Executes `hashToCurve` on the message and then multiplies the result by private key. + // S = pk x H(m) + function sign(message: Hex, privateKey: PrivKey, htfOpts?: htfBasicOpts): Uint8Array + function sign(message: G2, privateKey: PrivKey, htfOpts?: htfBasicOpts): G2 + function sign(message: G2Hex, privateKey: PrivKey, htfOpts?: htfBasicOpts): Uint8Array | G2 { + const msgPoint = normP2Hash(message, htfOpts) + msgPoint.assertValidity() + const sigPoint = msgPoint.multiply(G1.normPrivateKeyToScalar(privateKey)) + if (message instanceof G2.ProjectivePoint) return sigPoint + return Signature.toRawBytes(sigPoint) + } + + function signShortSignature(message: Hex, privateKey: PrivKey, htfOpts?: htfBasicOpts): Uint8Array + function signShortSignature(message: G1, privateKey: PrivKey, htfOpts?: htfBasicOpts): G1 + function signShortSignature( + message: G1Hex, + privateKey: PrivKey, + htfOpts?: htfBasicOpts, + ): Uint8Array | G1 { + const msgPoint = normP1Hash(message, htfOpts) + msgPoint.assertValidity() + const sigPoint = msgPoint.multiply(G1.normPrivateKeyToScalar(privateKey)) + if (message instanceof G1.ProjectivePoint) return sigPoint + return ShortSignature.toRawBytes(sigPoint) + } + + // Checks if pairing of public key & hash is equal to pairing of generator & signature. + // e(P, H(m)) == e(G, S) + function verify( + signature: G2Hex, + message: G2Hex, + publicKey: G1Hex, + htfOpts?: htfBasicOpts, + ): boolean { + const P = normP1(publicKey) + const Hm = normP2Hash(message, htfOpts) + const G = G1.ProjectivePoint.BASE + const S = normP2(signature) + const exp = pairingBatch([ + { g1: P.negate(), g2: Hm }, // ePHM = pairing(P.negate(), Hm, false); + { g1: G, g2: S }, // eGS = pairing(G, S, false); + ]) + return Fp12.eql(exp, Fp12.ONE) + } + + // Checks if pairing of public key & hash is equal to pairing of generator & signature. + // e(S, G) == e(H(m), P) + function verifyShortSignature( + signature: G1Hex, + message: G1Hex, + publicKey: G2Hex, + htfOpts?: htfBasicOpts, + ): boolean { + const P = normP2(publicKey) + const Hm = normP1Hash(message, htfOpts) + const G = G2.ProjectivePoint.BASE + const S = normP1(signature) + const exp = pairingBatch([ + { g1: Hm, g2: P }, // eHmP = pairing(Hm, P, false); + { g1: S, g2: G.negate() }, // eSG = pairing(S, G.negate(), false); + ]) + return Fp12.eql(exp, Fp12.ONE) + } + + function aNonEmpty(arr: any[]) { + if (!Array.isArray(arr) || arr.length === 0) throw new Error('expected non-empty array') + } + + // Adds a bunch of public key points together. + // pk1 + pk2 + pk3 = pkA + function aggregatePublicKeys(publicKeys: Hex[]): Uint8Array + function aggregatePublicKeys(publicKeys: G1[]): G1 + function aggregatePublicKeys(publicKeys: G1Hex[]): Uint8Array | G1 { + aNonEmpty(publicKeys) + const agg = publicKeys.map(normP1).reduce((sum, p) => sum.add(p), G1.ProjectivePoint.ZERO) + const aggAffine = agg //.toAffine(); + if (publicKeys[0] instanceof G1.ProjectivePoint) { + aggAffine.assertValidity() + return aggAffine + } + // toRawBytes ensures point validity + return aggAffine.toRawBytes(true) + } + + // Adds a bunch of signature points together. + function aggregateSignatures(signatures: Hex[]): Uint8Array + function aggregateSignatures(signatures: G2[]): G2 + function aggregateSignatures(signatures: G2Hex[]): Uint8Array | G2 { + aNonEmpty(signatures) + const agg = signatures.map(normP2).reduce((sum, s) => sum.add(s), G2.ProjectivePoint.ZERO) + const aggAffine = agg //.toAffine(); + if (signatures[0] instanceof G2.ProjectivePoint) { + aggAffine.assertValidity() + return aggAffine + } + return Signature.toRawBytes(aggAffine) + } + + // Adds a bunch of signature points together. + function aggregateShortSignatures(signatures: Hex[]): Uint8Array + function aggregateShortSignatures(signatures: G1[]): G1 + function aggregateShortSignatures(signatures: G1Hex[]): Uint8Array | G1 { + aNonEmpty(signatures) + const agg = signatures.map(normP1).reduce((sum, s) => sum.add(s), G1.ProjectivePoint.ZERO) + const aggAffine = agg //.toAffine(); + if (signatures[0] instanceof G1.ProjectivePoint) { + aggAffine.assertValidity() + return aggAffine + } + return ShortSignature.toRawBytes(aggAffine) + } + + // https://ethresear.ch/t/fast-verification-of-multiple-bls-signatures/5407 + // e(G, S) = e(G, SUM(n)(Si)) = MUL(n)(e(G, Si)) + function verifyBatch( + signature: G2Hex, + // TODO: maybe `{message: G2Hex, publicKey: G1Hex}[]` instead? + messages: G2Hex[], + publicKeys: G1Hex[], + htfOpts?: htfBasicOpts, + ): boolean { + aNonEmpty(messages) + if (publicKeys.length !== messages.length) + throw new Error('amount of public keys and messages should be equal') + const sig = normP2(signature) + const nMessages = messages.map((i) => normP2Hash(i, htfOpts)) + const nPublicKeys = publicKeys.map(normP1) + // NOTE: this works only for exact same object + const messagePubKeyMap = new Map() + for (let i = 0; i < nPublicKeys.length; i++) { + const pub = nPublicKeys[i] + const msg = nMessages[i] + let keys = messagePubKeyMap.get(msg) + if (keys === undefined) { + keys = [] + messagePubKeyMap.set(msg, keys) + } + keys.push(pub) + } + const paired = [] + try { + for (const [msg, keys] of messagePubKeyMap) { + const groupPublicKey = keys.reduce((acc, msg) => acc.add(msg)) + paired.push({ g1: groupPublicKey, g2: msg }) + } + paired.push({ g1: G1.ProjectivePoint.BASE.negate(), g2: sig }) + return Fp12.eql(pairingBatch(paired), Fp12.ONE) + } catch { + return false + } + } + + G1.ProjectivePoint.BASE._setWindowSize(4) + + return { + getPublicKey, + getPublicKeyForShortSignatures, + sign, + signShortSignature, + verify, + verifyBatch, + verifyShortSignature, + aggregatePublicKeys, + aggregateSignatures, + aggregateShortSignatures, + millerLoopBatch, + pairing, + pairingBatch, + G1, + G2, + Signature, + ShortSignature, + fields: { + Fr, + Fp, + Fp2, + Fp6, + Fp12, + }, + params: { + ateLoopSize: CURVE.params.ateLoopSize, + r: CURVE.params.r, + G1b: CURVE.G1.b, + G2b: CURVE.G2.b, + }, + utils, + } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/curve.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/curve.ts new file mode 100644 index 00000000000..6a7830a1970 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/curve.ts @@ -0,0 +1,466 @@ +/** + * Methods for elliptic curve multiplication by scalars. + * Contains wNAF, pippenger + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { type IField, nLength, validateField } from './modular.ts' +import { bitLen, bitMask, validateObject } from './utils.ts' + +const _0n = BigInt(0) +const _1n = BigInt(1) + +export type AffinePoint = { + x: T + y: T +} & { z?: never; t?: never } + +export interface Group> { + double(): T + negate(): T + add(other: T): T + subtract(other: T): T + equals(other: T): boolean + multiply(scalar: bigint): T +} + +export type GroupConstructor = { + BASE: T + ZERO: T +} +export type Mapper = (i: T[]) => T[] + +function constTimeNegate>(condition: boolean, item: T): T { + const neg = item.negate() + return condition ? neg : item +} + +function validateW(W: number, bits: number) { + if (!Number.isSafeInteger(W) || W <= 0 || W > bits) + throw new Error('invalid window size, expected [1..' + bits + '], got W=' + W) +} + +/** Internal wNAF opts for specific W and scalarBits */ +export type WOpts = { + windows: number + windowSize: number + mask: bigint + maxNumber: number + shiftBy: bigint +} + +function calcWOpts(W: number, scalarBits: number): WOpts { + validateW(W, scalarBits) + const windows = Math.ceil(scalarBits / W) + 1 // W=8 33. Not 32, because we skip zero + const windowSize = 2 ** (W - 1) // W=8 128. Not 256, because we skip zero + const maxNumber = 2 ** W // W=8 256 + const mask = bitMask(W) // W=8 255 == mask 0b11111111 + const shiftBy = BigInt(W) // W=8 8 + return { windows, windowSize, mask, maxNumber, shiftBy } +} + +function calcOffsets(n: bigint, window: number, wOpts: WOpts) { + const { windowSize, mask, maxNumber, shiftBy } = wOpts + let wbits = Number(n & mask) // extract W bits. + let nextN = n >> shiftBy // shift number by W bits. + + // What actually happens here: + // const highestBit = Number(mask ^ (mask >> 1n)); + // let wbits2 = wbits - 1; // skip zero + // if (wbits2 & highestBit) { wbits2 ^= Number(mask); // (~); + + // split if bits > max: +224 => 256-32 + if (wbits > windowSize) { + // we skip zero, which means instead of `>= size-1`, we do `> size` + wbits -= maxNumber // -32, can be maxNumber - wbits, but then we need to set isNeg here. + nextN += _1n // +256 (carry) + } + const offsetStart = window * windowSize + const offset = offsetStart + Math.abs(wbits) - 1 // -1 because we skip zero + const isZero = wbits === 0 // is current window slice a 0? + const isNeg = wbits < 0 // is current window slice negative? + const isNegF = window % 2 !== 0 // fake random statement for noise + const offsetF = offsetStart // fake offset for noise + return { nextN, offset, isZero, isNeg, isNegF, offsetF } +} + +function validateMSMPoints(points: any[], c: any) { + if (!Array.isArray(points)) throw new Error('array expected') + points.forEach((p, i) => { + if (!(p instanceof c)) throw new Error('invalid point at index ' + i) + }) +} +function validateMSMScalars(scalars: any[], field: any) { + if (!Array.isArray(scalars)) throw new Error('array of scalars expected') + scalars.forEach((s, i) => { + if (!field.isValid(s)) throw new Error('invalid scalar at index ' + i) + }) +} + +// Since points in different groups cannot be equal (different object constructor), +// we can have single place to store precomputes. +// Allows to make points frozen / immutable. +const pointPrecomputes = new WeakMap() +const pointWindowSizes = new WeakMap() + +function getW(P: any): number { + return pointWindowSizes.get(P) || 1 +} + +export type IWNAF> = { + constTimeNegate: >(condition: boolean, item: T) => T + hasPrecomputes(elm: T): boolean + unsafeLadder(elm: T, n: bigint, p?: T): T + precomputeWindow(elm: T, W: number): Group[] + getPrecomputes(W: number, P: T, transform: Mapper): T[] + wNAF(W: number, precomputes: T[], n: bigint): { p: T; f: T } + wNAFUnsafe(W: number, precomputes: T[], n: bigint, acc?: T): T + wNAFCached(P: T, n: bigint, transform: Mapper): { p: T; f: T } + wNAFCachedUnsafe(P: T, n: bigint, transform: Mapper, prev?: T): T + setWindowSize(P: T, W: number): void +} + +/** + * Elliptic curve multiplication of Point by scalar. Fragile. + * Scalars should always be less than curve order: this should be checked inside of a curve itself. + * Creates precomputation tables for fast multiplication: + * - private scalar is split by fixed size windows of W bits + * - every window point is collected from window's table & added to accumulator + * - since windows are different, same point inside tables won't be accessed more than once per calc + * - each multiplication is 'Math.ceil(CURVE_ORDER / 𝑊) + 1' point additions (fixed for any scalar) + * - +1 window is neccessary for wNAF + * - wNAF reduces table size: 2x less memory + 2x faster generation, but 10% slower multiplication + * + * @todo Research returning 2d JS array of windows, instead of a single window. + * This would allow windows to be in different memory locations + */ +export function wNAF>(c: GroupConstructor, bits: number): IWNAF { + return { + constTimeNegate, + + hasPrecomputes(elm: T) { + return getW(elm) !== 1 + }, + + // non-const time multiplication ladder + unsafeLadder(elm: T, n: bigint, p = c.ZERO) { + let d: T = elm + while (n > _0n) { + if (n & _1n) p = p.add(d) + d = d.double() + n >>= _1n + } + return p + }, + + /** + * Creates a wNAF precomputation window. Used for caching. + * Default window size is set by `utils.precompute()` and is equal to 8. + * Number of precomputed points depends on the curve size: + * 2^(𝑊−1) * (Math.ceil(𝑛 / 𝑊) + 1), where: + * - 𝑊 is the window size + * - 𝑛 is the bitlength of the curve order. + * For a 256-bit curve and window size 8, the number of precomputed points is 128 * 33 = 4224. + * @param elm Point instance + * @param W window size + * @returns precomputed point tables flattened to a single array + */ + precomputeWindow(elm: T, W: number): Group[] { + const { windows, windowSize } = calcWOpts(W, bits) + const points: T[] = [] + let p: T = elm + let base = p + for (let window = 0; window < windows; window++) { + base = p + points.push(base) + // i=1, bc we skip 0 + for (let i = 1; i < windowSize; i++) { + base = base.add(p) + points.push(base) + } + p = base.double() + } + return points + }, + + /** + * Implements ec multiplication using precomputed tables and w-ary non-adjacent form. + * @param W window size + * @param precomputes precomputed tables + * @param n scalar (we don't check here, but should be less than curve order) + * @returns real and fake (for const-time) points + */ + wNAF(W: number, precomputes: T[], n: bigint): { p: T; f: T } { + // Smaller version: + // https://github.com/paulmillr/noble-secp256k1/blob/47cb1669b6e506ad66b35fe7d76132ae97465da2/index.ts#L502-L541 + // TODO: check the scalar is less than group order? + // wNAF behavior is undefined otherwise. But have to carefully remove + // other checks before wNAF. ORDER == bits here. + // Accumulators + let p = c.ZERO + let f = c.BASE + // This code was first written with assumption that 'f' and 'p' will never be infinity point: + // since each addition is multiplied by 2 ** W, it cannot cancel each other. However, + // there is negate now: it is possible that negated element from low value + // would be the same as high element, which will create carry into next window. + // It's not obvious how this can fail, but still worth investigating later. + const wo = calcWOpts(W, bits) + for (let window = 0; window < wo.windows; window++) { + // (n === _0n) is handled and not early-exited. isEven and offsetF are used for noise + const { nextN, offset, isZero, isNeg, isNegF, offsetF } = calcOffsets(n, window, wo) + n = nextN + if (isZero) { + // bits are 0: add garbage to fake point + // Important part for const-time getPublicKey: add random "noise" point to f. + f = f.add(constTimeNegate(isNegF, precomputes[offsetF])) + } else { + // bits are 1: add to result point + p = p.add(constTimeNegate(isNeg, precomputes[offset])) + } + } + // Return both real and fake points: JIT won't eliminate f. + // At this point there is a way to F be infinity-point even if p is not, + // which makes it less const-time: around 1 bigint multiply. + return { p, f } + }, + + /** + * Implements ec unsafe (non const-time) multiplication using precomputed tables and w-ary non-adjacent form. + * @param W window size + * @param precomputes precomputed tables + * @param n scalar (we don't check here, but should be less than curve order) + * @param acc accumulator point to add result of multiplication + * @returns point + */ + wNAFUnsafe(W: number, precomputes: T[], n: bigint, acc: T = c.ZERO): T { + const wo = calcWOpts(W, bits) + for (let window = 0; window < wo.windows; window++) { + if (n === _0n) break // Early-exit, skip 0 value + const { nextN, offset, isZero, isNeg } = calcOffsets(n, window, wo) + n = nextN + if (isZero) { + // Window bits are 0: skip processing. + // Move to next window. + continue + } else { + const item = precomputes[offset] + acc = acc.add(isNeg ? item.negate() : item) // Re-using acc allows to save adds in MSM + } + } + return acc + }, + + getPrecomputes(W: number, P: T, transform: Mapper): T[] { + // Calculate precomputes on a first run, reuse them after + let comp = pointPrecomputes.get(P) + if (!comp) { + comp = this.precomputeWindow(P, W) as T[] + if (W !== 1) pointPrecomputes.set(P, transform(comp)) + } + return comp + }, + + wNAFCached(P: T, n: bigint, transform: Mapper): { p: T; f: T } { + const W = getW(P) + return this.wNAF(W, this.getPrecomputes(W, P, transform), n) + }, + + wNAFCachedUnsafe(P: T, n: bigint, transform: Mapper, prev?: T): T { + const W = getW(P) + if (W === 1) return this.unsafeLadder(P, n, prev) // For W=1 ladder is ~x2 faster + return this.wNAFUnsafe(W, this.getPrecomputes(W, P, transform), n, prev) + }, + + // We calculate precomputes for elliptic curve point multiplication + // using windowed method. This specifies window size and + // stores precomputed values. Usually only base point would be precomputed. + + setWindowSize(P: T, W: number) { + validateW(W, bits) + pointWindowSizes.set(P, W) + pointPrecomputes.delete(P) + }, + } +} + +/** + * Pippenger algorithm for multi-scalar multiplication (MSM, Pa + Qb + Rc + ...). + * 30x faster vs naive addition on L=4096, 10x faster than precomputes. + * For N=254bit, L=1, it does: 1024 ADD + 254 DBL. For L=5: 1536 ADD + 254 DBL. + * Algorithmically constant-time (for same L), even when 1 point + scalar, or when scalar = 0. + * @param c Curve Point constructor + * @param fieldN field over CURVE.N - important that it's not over CURVE.P + * @param points array of L curve points + * @param scalars array of L scalars (aka private keys / bigints) + */ +export function pippenger>( + c: GroupConstructor, + fieldN: IField, + points: T[], + scalars: bigint[], +): T { + // If we split scalars by some window (let's say 8 bits), every chunk will only + // take 256 buckets even if there are 4096 scalars, also re-uses double. + // TODO: + // - https://eprint.iacr.org/2024/750.pdf + // - https://tches.iacr.org/index.php/TCHES/article/view/10287 + // 0 is accepted in scalars + validateMSMPoints(points, c) + validateMSMScalars(scalars, fieldN) + if (points.length !== scalars.length) + throw new Error('arrays of points and scalars must have equal length') + const zero = c.ZERO + const wbits = bitLen(BigInt(points.length)) + const windowSize = wbits > 12 ? wbits - 3 : wbits > 4 ? wbits - 2 : wbits ? 2 : 1 // in bits + const MASK = bitMask(windowSize) + const buckets = new Array(Number(MASK) + 1).fill(zero) // +1 for zero array + const lastBits = Math.floor((fieldN.BITS - 1) / windowSize) * windowSize + let sum = zero + for (let i = lastBits; i >= 0; i -= windowSize) { + buckets.fill(zero) + for (let j = 0; j < scalars.length; j++) { + const scalar = scalars[j] + const wbits = Number((scalar >> BigInt(i)) & MASK) + buckets[wbits] = buckets[wbits].add(points[j]) + } + let resI = zero // not using this will do small speed-up, but will lose ct + // Skip first bucket, because it is zero + for (let j = buckets.length - 1, sumI = zero; j > 0; j--) { + sumI = sumI.add(buckets[j]) + resI = resI.add(sumI) + } + sum = sum.add(resI) + if (i !== 0) for (let j = 0; j < windowSize; j++) sum = sum.double() + } + return sum as T +} +/** + * Precomputed multi-scalar multiplication (MSM, Pa + Qb + Rc + ...). + * @param c Curve Point constructor + * @param fieldN field over CURVE.N - important that it's not over CURVE.P + * @param points array of L curve points + * @returns function which multiplies points with scaars + */ +export function precomputeMSMUnsafe>( + c: GroupConstructor, + fieldN: IField, + points: T[], + windowSize: number, +): (scalars: bigint[]) => T { + /** + * Performance Analysis of Window-based Precomputation + * + * Base Case (256-bit scalar, 8-bit window): + * - Standard precomputation requires: + * - 31 additions per scalar × 256 scalars = 7,936 ops + * - Plus 255 summary additions = 8,191 total ops + * Note: Summary additions can be optimized via accumulator + * + * Chunked Precomputation Analysis: + * - Using 32 chunks requires: + * - 255 additions per chunk + * - 256 doublings + * - Total: (255 × 32) + 256 = 8,416 ops + * + * Memory Usage Comparison: + * Window Size | Standard Points | Chunked Points + * ------------|-----------------|--------------- + * 4-bit | 520 | 15 + * 8-bit | 4,224 | 255 + * 10-bit | 13,824 | 1,023 + * 16-bit | 557,056 | 65,535 + * + * Key Advantages: + * 1. Enables larger window sizes due to reduced memory overhead + * 2. More efficient for smaller scalar counts: + * - 16 chunks: (16 × 255) + 256 = 4,336 ops + * - ~2x faster than standard 8,191 ops + * + * Limitations: + * - Not suitable for plain precomputes (requires 256 constant doublings) + * - Performance degrades with larger scalar counts: + * - Optimal for ~256 scalars + * - Less efficient for 4096+ scalars (Pippenger preferred) + */ + validateW(windowSize, fieldN.BITS) + validateMSMPoints(points, c) + const zero = c.ZERO + const tableSize = 2 ** windowSize - 1 // table size (without zero) + const chunks = Math.ceil(fieldN.BITS / windowSize) // chunks of item + const MASK = bitMask(windowSize) + const tables = points.map((p: T) => { + const res = [] + for (let i = 0, acc = p; i < tableSize; i++) { + res.push(acc) + acc = acc.add(p) + } + return res + }) + return (scalars: bigint[]): T => { + validateMSMScalars(scalars, fieldN) + if (scalars.length > points.length) + throw new Error('array of scalars must be smaller than array of points') + let res = zero + for (let i = 0; i < chunks; i++) { + // No need to double if accumulator is still zero. + if (res !== zero) for (let j = 0; j < windowSize; j++) res = res.double() + const shiftBy = BigInt(chunks * windowSize - (i + 1) * windowSize) + for (let j = 0; j < scalars.length; j++) { + const n = scalars[j] + const curr = Number((n >> shiftBy) & MASK) + if (!curr) continue // skip zero scalars chunks + res = res.add(tables[j][curr - 1]) + } + } + return res + } +} + +/** + * Generic BasicCurve interface: works even for polynomial fields (BLS): P, n, h would be ok. + * Though generator can be different (Fp2 / Fp6 for BLS). + */ +export type BasicCurve = { + Fp: IField // Field over which we'll do calculations (Fp) + n: bigint // Curve order, total count of valid points in the field + nBitLength?: number // bit length of curve order + nByteLength?: number // byte length of curve order + h: bigint // cofactor. we can assign default=1, but users will just ignore it w/o validation + hEff?: bigint // Number to multiply to clear cofactor + Gx: T // base point X coordinate + Gy: T // base point Y coordinate + allowInfinityPoint?: boolean // bls12-381 requires it. ZERO point is valid, but invalid pubkey +} + +export function validateBasic( + curve: BasicCurve & T, +): Readonly< + { + readonly nBitLength: number + readonly nByteLength: number + } & BasicCurve & + T & { + p: bigint + } +> { + validateField(curve.Fp) + validateObject( + curve, + { + n: 'bigint', + h: 'bigint', + Gx: 'field', + Gy: 'field', + }, + { + nBitLength: 'isSafeInteger', + nByteLength: 'isSafeInteger', + }, + ) + // Set defaults + return Object.freeze({ + ...nLength(curve.n, curve.nBitLength), + ...curve, + ...{ p: curve.Fp.ORDER }, + } as const) +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/edwards.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/edwards.ts new file mode 100644 index 00000000000..79a37fad3f9 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/edwards.ts @@ -0,0 +1,567 @@ +/** + * Twisted Edwards curve. The formula is: ax² + y² = 1 + dx²y². + * For design rationale of types / exports, see weierstrass module documentation. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { + type AffinePoint, + type BasicCurve, + type Group, + type GroupConstructor, + pippenger, + validateBasic, + wNAF, +} from './curve.ts' +import { Field, mod } from './modular.ts' +// prettier-ignore +import { + type FHash, + type Hex, + aInRange, + abool, + bytesToHex, + bytesToNumberLE, + concatBytes, + ensureBytes, + memoized, + numberToBytesLE, + validateObject, +} from './utils.ts' + +// Be friendly to bad ECMAScript parsers by not using bigint literals +// prettier-ignore +const _0n = BigInt(0), + _1n = BigInt(1), + _2n = BigInt(2), + _8n = BigInt(8) + +/** Edwards curves must declare params a & d. */ +export type CurveType = BasicCurve & { + a: bigint // curve param a + d: bigint // curve param d + hash: FHash // Hashing + randomBytes: (bytesLength?: number) => Uint8Array // CSPRNG + adjustScalarBytes?: (bytes: Uint8Array) => Uint8Array // clears bits to get valid field elemtn + domain?: (data: Uint8Array, ctx: Uint8Array, phflag: boolean) => Uint8Array // Used for hashing + uvRatio?: (u: bigint, v: bigint) => { isValid: boolean; value: bigint } // Ratio √(u/v) + prehash?: FHash // RFC 8032 pre-hashing of messages to sign() / verify() + mapToCurve?: (scalar: bigint[]) => AffinePoint // for hash-to-curve standard +} + +export type CurveTypeWithLength = Readonly + +// verification rule is either zip215 or rfc8032 / nist186-5. Consult fromHex: +const VERIFY_DEFAULT = { zip215: true } + +function validateOpts(curve: CurveType): CurveTypeWithLength { + const opts = validateBasic(curve) + validateObject( + curve, + { + hash: 'function', + a: 'bigint', + d: 'bigint', + randomBytes: 'function', + }, + { + adjustScalarBytes: 'function', + domain: 'function', + uvRatio: 'function', + mapToCurve: 'function', + }, + ) + // Set defaults + return Object.freeze({ ...opts } as const) +} + +/** Instance of Extended Point with coordinates in X, Y, Z, T. */ +export interface ExtPointType extends Group { + readonly ex: bigint + readonly ey: bigint + readonly ez: bigint + readonly et: bigint + get x(): bigint + get y(): bigint + assertValidity(): void + multiply(scalar: bigint): ExtPointType + multiplyUnsafe(scalar: bigint): ExtPointType + isSmallOrder(): boolean + isTorsionFree(): boolean + clearCofactor(): ExtPointType + toAffine(iz?: bigint): AffinePoint + toRawBytes(isCompressed?: boolean): Uint8Array + toHex(isCompressed?: boolean): string + _setWindowSize(windowSize: number): void +} +/** Static methods of Extended Point with coordinates in X, Y, Z, T. */ +export interface ExtPointConstructor extends GroupConstructor { + new (x: bigint, y: bigint, z: bigint, t: bigint): ExtPointType + fromAffine(p: AffinePoint): ExtPointType + fromHex(hex: Hex): ExtPointType + fromPrivateKey(privateKey: Hex): ExtPointType + msm(points: ExtPointType[], scalars: bigint[]): ExtPointType +} + +/** + * Edwards Curve interface. + * Main methods: `getPublicKey(priv)`, `sign(msg, priv)`, `verify(sig, msg, pub)`. + */ +export type CurveFn = { + CURVE: ReturnType + getPublicKey: (privateKey: Hex) => Uint8Array + sign: (message: Hex, privateKey: Hex, options?: { context?: Hex }) => Uint8Array + verify: ( + sig: Hex, + message: Hex, + publicKey: Hex, + options?: { context?: Hex; zip215: boolean }, + ) => boolean + ExtendedPoint: ExtPointConstructor + utils: { + randomPrivateKey: () => Uint8Array + getExtendedPublicKey: (key: Hex) => { + head: Uint8Array + prefix: Uint8Array + scalar: bigint + point: ExtPointType + pointBytes: Uint8Array + } + precompute: (windowSize?: number, point?: ExtPointType) => ExtPointType + } +} + +/** + * Creates Twisted Edwards curve with EdDSA signatures. + * @example + * import { Field } from '@noble/curves/abstract/modular'; + * // Before that, define BigInt-s: a, d, p, n, Gx, Gy, h + * const curve = twistedEdwards({ a, d, Fp: Field(p), n, Gx, Gy, h }) + */ +export function twistedEdwards(curveDef: CurveType): CurveFn { + const CURVE = validateOpts(curveDef) as ReturnType + const { + Fp, + n: CURVE_ORDER, + prehash: prehash, + hash: cHash, + randomBytes, + nByteLength, + h: cofactor, + } = CURVE + // Important: + // There are some places where Fp.BYTES is used instead of nByteLength. + // So far, everything has been tested with curves of Fp.BYTES == nByteLength. + // TODO: test and find curves which behave otherwise. + const MASK = _2n << (BigInt(nByteLength * 8) - _1n) + const modP = Fp.create // Function overrides + const Fn = Field(CURVE.n, CURVE.nBitLength) + + // sqrt(u/v) + const uvRatio = + CURVE.uvRatio || + ((u: bigint, v: bigint) => { + try { + return { isValid: true, value: Fp.sqrt(u * Fp.inv(v)) } + } catch (e) { + return { isValid: false, value: _0n } + } + }) + const adjustScalarBytes = CURVE.adjustScalarBytes || ((bytes: Uint8Array) => bytes) // NOOP + const domain = + CURVE.domain || + ((data: Uint8Array, ctx: Uint8Array, phflag: boolean) => { + abool('phflag', phflag) + if (ctx.length || phflag) throw new Error('Contexts/pre-hash are not supported') + return data + }) // NOOP + // 0 <= n < MASK + // Coordinates larger than Fp.ORDER are allowed for zip215 + function aCoordinate(title: string, n: bigint, banZero = false) { + const min = banZero ? _1n : _0n + aInRange('coordinate ' + title, n, min, MASK) + } + + function aextpoint(other: unknown) { + if (!(other instanceof Point)) throw new Error('ExtendedPoint expected') + } + // Converts Extended point to default (x, y) coordinates. + // Can accept precomputed Z^-1 - for example, from invertBatch. + const toAffineMemo = memoized((p: Point, iz?: bigint): AffinePoint => { + const { ex: x, ey: y, ez: z } = p + const is0 = p.is0() + if (iz == null) iz = is0 ? _8n : (Fp.inv(z) as bigint) // 8 was chosen arbitrarily + const ax = modP(x * iz) + const ay = modP(y * iz) + const zz = modP(z * iz) + if (is0) return { x: _0n, y: _1n } + if (zz !== _1n) throw new Error('invZ was invalid') + return { x: ax, y: ay } + }) + const assertValidMemo = memoized((p: Point) => { + const { a, d } = CURVE + if (p.is0()) throw new Error('bad point: ZERO') // TODO: optimize, with vars below? + // Equation in affine coordinates: ax² + y² = 1 + dx²y² + // Equation in projective coordinates (X/Z, Y/Z, Z): (aX² + Y²)Z² = Z⁴ + dX²Y² + const { ex: X, ey: Y, ez: Z, et: T } = p + const X2 = modP(X * X) // X² + const Y2 = modP(Y * Y) // Y² + const Z2 = modP(Z * Z) // Z² + const Z4 = modP(Z2 * Z2) // Z⁴ + const aX2 = modP(X2 * a) // aX² + const left = modP(Z2 * modP(aX2 + Y2)) // (aX² + Y²)Z² + const right = modP(Z4 + modP(d * modP(X2 * Y2))) // Z⁴ + dX²Y² + if (left !== right) throw new Error('bad point: equation left != right (1)') + // In Extended coordinates we also have T, which is x*y=T/Z: check X*Y == Z*T + const XY = modP(X * Y) + const ZT = modP(Z * T) + if (XY !== ZT) throw new Error('bad point: equation left != right (2)') + return true + }) + + // Extended Point works in extended coordinates: (x, y, z, t) ∋ (x=x/z, y=y/z, t=xy). + // https://en.wikipedia.org/wiki/Twisted_Edwards_curve#Extended_coordinates + class Point implements ExtPointType { + static readonly BASE = new Point(CURVE.Gx, CURVE.Gy, _1n, modP(CURVE.Gx * CURVE.Gy)) + static readonly ZERO = new Point(_0n, _1n, _1n, _0n) // 0, 1, 1, 0 + readonly ex: bigint + readonly ey: bigint + readonly ez: bigint + readonly et: bigint + + constructor(ex: bigint, ey: bigint, ez: bigint, et: bigint) { + aCoordinate('x', ex) + aCoordinate('y', ey) + aCoordinate('z', ez, true) + aCoordinate('t', et) + this.ex = ex + this.ey = ey + this.ez = ez + this.et = et + Object.freeze(this) + } + + get x(): bigint { + return this.toAffine().x + } + get y(): bigint { + return this.toAffine().y + } + + static fromAffine(p: AffinePoint): Point { + if (p instanceof Point) throw new Error('extended point not allowed') + const { x, y } = p || {} + aCoordinate('x', x) + aCoordinate('y', y) + return new Point(x, y, _1n, modP(x * y)) + } + static normalizeZ(points: Point[]): Point[] { + const toInv = Fp.invertBatch(points.map((p) => p.ez)) + return points.map((p, i) => p.toAffine(toInv[i])).map(Point.fromAffine) + } + // Multiscalar Multiplication + static msm(points: Point[], scalars: bigint[]): Point { + return pippenger(Point, Fn, points, scalars) + } + + // "Private method", don't use it directly + _setWindowSize(windowSize: number) { + wnaf.setWindowSize(this, windowSize) + } + // Not required for fromHex(), which always creates valid points. + // Could be useful for fromAffine(). + assertValidity(): void { + assertValidMemo(this) + } + + // Compare one point to another. + equals(other: Point): boolean { + aextpoint(other) + const { ex: X1, ey: Y1, ez: Z1 } = this + const { ex: X2, ey: Y2, ez: Z2 } = other + const X1Z2 = modP(X1 * Z2) + const X2Z1 = modP(X2 * Z1) + const Y1Z2 = modP(Y1 * Z2) + const Y2Z1 = modP(Y2 * Z1) + return X1Z2 === X2Z1 && Y1Z2 === Y2Z1 + } + + is0(): boolean { + return this.equals(Point.ZERO) + } + + negate(): Point { + // Flips point sign to a negative one (-x, y in affine coords) + return new Point(modP(-this.ex), this.ey, this.ez, modP(-this.et)) + } + + // Fast algo for doubling Extended Point. + // https://hyperelliptic.org/EFD/g1p/auto-twisted-extended.html#doubling-dbl-2008-hwcd + // Cost: 4M + 4S + 1*a + 6add + 1*2. + double(): Point { + const { a } = CURVE + const { ex: X1, ey: Y1, ez: Z1 } = this + const A = modP(X1 * X1) // A = X12 + const B = modP(Y1 * Y1) // B = Y12 + const C = modP(_2n * modP(Z1 * Z1)) // C = 2*Z12 + const D = modP(a * A) // D = a*A + const x1y1 = X1 + Y1 + const E = modP(modP(x1y1 * x1y1) - A - B) // E = (X1+Y1)2-A-B + const G = D + B // G = D+B + const F = G - C // F = G-C + const H = D - B // H = D-B + const X3 = modP(E * F) // X3 = E*F + const Y3 = modP(G * H) // Y3 = G*H + const T3 = modP(E * H) // T3 = E*H + const Z3 = modP(F * G) // Z3 = F*G + return new Point(X3, Y3, Z3, T3) + } + + // Fast algo for adding 2 Extended Points. + // https://hyperelliptic.org/EFD/g1p/auto-twisted-extended.html#addition-add-2008-hwcd + // Cost: 9M + 1*a + 1*d + 7add. + add(other: Point) { + aextpoint(other) + const { a, d } = CURVE + const { ex: X1, ey: Y1, ez: Z1, et: T1 } = this + const { ex: X2, ey: Y2, ez: Z2, et: T2 } = other + const A = modP(X1 * X2) // A = X1*X2 + const B = modP(Y1 * Y2) // B = Y1*Y2 + const C = modP(T1 * d * T2) // C = T1*d*T2 + const D = modP(Z1 * Z2) // D = Z1*Z2 + const E = modP((X1 + Y1) * (X2 + Y2) - A - B) // E = (X1+Y1)*(X2+Y2)-A-B + const F = D - C // F = D-C + const G = D + C // G = D+C + const H = modP(B - a * A) // H = B-a*A + const X3 = modP(E * F) // X3 = E*F + const Y3 = modP(G * H) // Y3 = G*H + const T3 = modP(E * H) // T3 = E*H + const Z3 = modP(F * G) // Z3 = F*G + return new Point(X3, Y3, Z3, T3) + } + + subtract(other: Point): Point { + return this.add(other.negate()) + } + + private wNAF(n: bigint): { p: Point; f: Point } { + return wnaf.wNAFCached(this, n, Point.normalizeZ) + } + + // Constant-time multiplication. + multiply(scalar: bigint): Point { + const n = scalar + aInRange('scalar', n, _1n, CURVE_ORDER) // 1 <= scalar < L + const { p, f } = this.wNAF(n) + return Point.normalizeZ([p, f])[0] + } + + // Non-constant-time multiplication. Uses double-and-add algorithm. + // It's faster, but should only be used when you don't care about + // an exposed private key e.g. sig verification. + // Does NOT allow scalars higher than CURVE.n. + // Accepts optional accumulator to merge with multiply (important for sparse scalars) + multiplyUnsafe(scalar: bigint, acc = Point.ZERO): Point { + const n = scalar + aInRange('scalar', n, _0n, CURVE_ORDER) // 0 <= scalar < L + if (n === _0n) return I + if (this.is0() || n === _1n) return this + return wnaf.wNAFCachedUnsafe(this, n, Point.normalizeZ, acc) + } + + // Checks if point is of small order. + // If you add something to small order point, you will have "dirty" + // point with torsion component. + // Multiplies point by cofactor and checks if the result is 0. + isSmallOrder(): boolean { + return this.multiplyUnsafe(cofactor).is0() + } + + // Multiplies point by curve order and checks if the result is 0. + // Returns `false` is the point is dirty. + isTorsionFree(): boolean { + return wnaf.unsafeLadder(this, CURVE_ORDER).is0() + } + + // Converts Extended point to default (x, y) coordinates. + // Can accept precomputed Z^-1 - for example, from invertBatch. + toAffine(iz?: bigint): AffinePoint { + return toAffineMemo(this, iz) + } + + clearCofactor(): Point { + const { h: cofactor } = CURVE + if (cofactor === _1n) return this + return this.multiplyUnsafe(cofactor) + } + + // Converts hash string or Uint8Array to Point. + // Uses algo from RFC8032 5.1.3. + static fromHex(hex: Hex, zip215 = false): Point { + const { d, a } = CURVE + const len = Fp.BYTES + hex = ensureBytes('pointHex', hex, len) // copy hex to a new array + abool('zip215', zip215) + const normed = hex.slice() // copy again, we'll manipulate it + const lastByte = hex[len - 1] // select last byte + normed[len - 1] = lastByte & ~0x80 // clear last bit + const y = bytesToNumberLE(normed) + + // zip215=true is good for consensus-critical apps. =false follows RFC8032 / NIST186-5. + // RFC8032 prohibits >= p, but ZIP215 doesn't + // zip215=true: 0 <= y < MASK (2^256 for ed25519) + // zip215=false: 0 <= y < P (2^255-19 for ed25519) + const max = zip215 ? MASK : Fp.ORDER + aInRange('pointHex.y', y, _0n, max) + + // Ed25519: x² = (y²-1)/(dy²+1) mod p. Ed448: x² = (y²-1)/(dy²-1) mod p. Generic case: + // ax²+y²=1+dx²y² => y²-1=dx²y²-ax² => y²-1=x²(dy²-a) => x²=(y²-1)/(dy²-a) + const y2 = modP(y * y) // denominator is always non-0 mod p. + const u = modP(y2 - _1n) // u = y² - 1 + const v = modP(d * y2 - a) // v = d y² + 1. + let { isValid, value: x } = uvRatio(u, v) // √(u/v) + if (!isValid) throw new Error('Point.fromHex: invalid y coordinate') + const isXOdd = (x & _1n) === _1n // There are 2 square roots. Use x_0 bit to select proper + const isLastByteOdd = (lastByte & 0x80) !== 0 // x_0, last bit + if (!zip215 && x === _0n && isLastByteOdd) + // if x=0 and x_0 = 1, fail + throw new Error('Point.fromHex: x=0 and x_0=1') + if (isLastByteOdd !== isXOdd) x = modP(-x) // if x_0 != x mod 2, set x = p-x + return Point.fromAffine({ x, y }) + } + static fromPrivateKey(privKey: Hex): Point { + const { scalar } = getPrivateScalar(privKey) + return G.multiply(scalar) // reduced one call of `toRawBytes` + } + toRawBytes(): Uint8Array { + const { x, y } = this.toAffine() + const bytes = numberToBytesLE(y, Fp.BYTES) // each y has 2 x values (x, -y) + bytes[bytes.length - 1] |= x & _1n ? 0x80 : 0 // when compressing, it's enough to store y + return bytes // and use the last byte to encode sign of x + } + toHex(): string { + return bytesToHex(this.toRawBytes()) // Same as toRawBytes, but returns string. + } + } + const { BASE: G, ZERO: I } = Point + const wnaf = wNAF(Point, nByteLength * 8) + + function modN(a: bigint) { + return mod(a, CURVE_ORDER) + } + // Little-endian SHA512 with modulo n + function modN_LE(hash: Uint8Array): bigint { + return modN(bytesToNumberLE(hash)) + } + + // Get the hashed private scalar per RFC8032 5.1.5 + function getPrivateScalar(key: Hex) { + const len = Fp.BYTES + key = ensureBytes('private key', key, len) + // Hash private key with curve's hash function to produce uniformingly random input + // Check byte lengths: ensure(64, h(ensure(32, key))) + const hashed = ensureBytes('hashed private key', cHash(key), 2 * len) + const head = adjustScalarBytes(hashed.slice(0, len)) // clear first half bits, produce FE + const prefix = hashed.slice(len, 2 * len) // second half is called key prefix (5.1.6) + const scalar = modN_LE(head) // The actual private scalar + return { head, prefix, scalar } + } + + // Convenience method that creates public key from scalar. RFC8032 5.1.5 + function getExtendedPublicKey(key: Hex) { + const { head, prefix, scalar } = getPrivateScalar(key) + const point = G.multiply(scalar) // Point on Edwards curve aka public key + const pointBytes = point.toRawBytes() // Uint8Array representation + return { head, prefix, scalar, point, pointBytes } + } + + // Calculates EdDSA pub key. RFC8032 5.1.5. Privkey is hashed. Use first half with 3 bits cleared + function getPublicKey(privKey: Hex): Uint8Array { + return getExtendedPublicKey(privKey).pointBytes + } + + // int('LE', SHA512(dom2(F, C) || msgs)) mod N + function hashDomainToScalar(context: Hex = new Uint8Array(), ...msgs: Uint8Array[]) { + const msg = concatBytes(...msgs) + return modN_LE(cHash(domain(msg, ensureBytes('context', context), !!prehash))) + } + + /** Signs message with privateKey. RFC8032 5.1.6 */ + function sign(msg: Hex, privKey: Hex, options: { context?: Hex } = {}): Uint8Array { + msg = ensureBytes('message', msg) + if (prehash) msg = prehash(msg) // for ed25519ph etc. + const { prefix, scalar, pointBytes } = getExtendedPublicKey(privKey) + const r = hashDomainToScalar(options.context, prefix, msg) // r = dom2(F, C) || prefix || PH(M) + const R = G.multiply(r).toRawBytes() // R = rG + const k = hashDomainToScalar(options.context, R, pointBytes, msg) // R || A || PH(M) + const s = modN(r + k * scalar) // S = (r + k * s) mod L + aInRange('signature.s', s, _0n, CURVE_ORDER) // 0 <= s < l + const res = concatBytes(R, numberToBytesLE(s, Fp.BYTES)) + return ensureBytes('result', res, Fp.BYTES * 2) // 64-byte signature + } + + const verifyOpts: { context?: Hex; zip215?: boolean } = VERIFY_DEFAULT + + /** + * Verifies EdDSA signature against message and public key. RFC8032 5.1.7. + * An extended group equation is checked. + */ + function verify(sig: Hex, msg: Hex, publicKey: Hex, options = verifyOpts): boolean { + const { context, zip215 } = options + const len = Fp.BYTES // Verifies EdDSA signature against message and public key. RFC8032 5.1.7. + sig = ensureBytes('signature', sig, 2 * len) // An extended group equation is checked. + msg = ensureBytes('message', msg) + publicKey = ensureBytes('publicKey', publicKey, len) + if (zip215 !== undefined) abool('zip215', zip215) + if (prehash) msg = prehash(msg) // for ed25519ph, etc + + const s = bytesToNumberLE(sig.slice(len, 2 * len)) + let A, R, SB + try { + // zip215=true is good for consensus-critical apps. =false follows RFC8032 / NIST186-5. + // zip215=true: 0 <= y < MASK (2^256 for ed25519) + // zip215=false: 0 <= y < P (2^255-19 for ed25519) + A = Point.fromHex(publicKey, zip215) + R = Point.fromHex(sig.slice(0, len), zip215) + SB = G.multiplyUnsafe(s) // 0 <= s < l is done inside + } catch (error) { + return false + } + if (!zip215 && A.isSmallOrder()) return false + + const k = hashDomainToScalar(context, R.toRawBytes(), A.toRawBytes(), msg) + const RkA = R.add(A.multiplyUnsafe(k)) + // Extended group equation + // [8][S]B = [8]R + [8][k]A' + return RkA.subtract(SB).clearCofactor().equals(Point.ZERO) + } + + G._setWindowSize(8) // Enable precomputes. Slows down first publicKey computation by 20ms. + + const utils = { + getExtendedPublicKey, + /** ed25519 priv keys are uniform 32b. No need to check for modulo bias, like in secp256k1. */ + randomPrivateKey: (): Uint8Array => randomBytes(Fp.BYTES), + + /** + * We're doing scalar multiplication (used in getPublicKey etc) with precomputed BASE_POINT + * values. This slows down first getPublicKey() by milliseconds (see Speed section), + * but allows to speed-up subsequent getPublicKey() calls up to 20x. + * @param windowSize 2, 4, 8, 16 + */ + precompute(windowSize = 8, point: ExtPointType = Point.BASE): ExtPointType { + point._setWindowSize(windowSize) + point.multiply(BigInt(3)) + return point + }, + } + + return { + CURVE, + getPublicKey, + sign, + verify, + ExtendedPoint: Point, + utils, + } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/hash-to-curve.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/hash-to-curve.ts new file mode 100644 index 00000000000..7ce3aad8cb2 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/hash-to-curve.ts @@ -0,0 +1,262 @@ +/** + * hash-to-curve from RFC 9380. + * Hashes arbitrary-length byte strings to a list of one or more elements of a finite field F. + * https://www.rfc-editor.org/rfc/rfc9380 + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import type { AffinePoint, Group, GroupConstructor } from './curve.ts' +import { type IField, mod } from './modular.ts' +import type { CHash } from './utils.ts' +import { abytes, bytesToNumberBE, concatBytes, utf8ToBytes, validateObject } from './utils.ts' + +export type UnicodeOrBytes = string | Uint8Array + +/** + * * `DST` is a domain separation tag, defined in section 2.2.5 + * * `p` characteristic of F, where F is a finite field of characteristic p and order q = p^m + * * `m` is extension degree (1 for prime fields) + * * `k` is the target security target in bits (e.g. 128), from section 5.1 + * * `expand` is `xmd` (SHA2, SHA3, BLAKE) or `xof` (SHAKE, BLAKE-XOF) + * * `hash` conforming to `utils.CHash` interface, with `outputLen` / `blockLen` props + */ +export type Opts = { + DST: UnicodeOrBytes + p: bigint + m: number + k: number + expand: 'xmd' | 'xof' + hash: CHash +} + +// Octet Stream to Integer. "spec" implementation of os2ip is 2.5x slower vs bytesToNumberBE. +const os2ip = bytesToNumberBE + +// Integer to Octet Stream (numberToBytesBE) +function i2osp(value: number, length: number): Uint8Array { + anum(value) + anum(length) + if (value < 0 || value >= 1 << (8 * length)) throw new Error('invalid I2OSP input: ' + value) + const res = Array.from({ length }).fill(0) as number[] + for (let i = length - 1; i >= 0; i--) { + res[i] = value & 0xff + value >>>= 8 + } + return new Uint8Array(res) +} + +function strxor(a: Uint8Array, b: Uint8Array): Uint8Array { + const arr = new Uint8Array(a.length) + for (let i = 0; i < a.length; i++) { + arr[i] = a[i] ^ b[i] + } + return arr +} + +function anum(item: unknown): void { + if (!Number.isSafeInteger(item)) throw new Error('number expected') +} + +/** + * Produces a uniformly random byte string using a cryptographic hash function H that outputs b bits. + * [RFC 9380 5.3.1](https://www.rfc-editor.org/rfc/rfc9380#section-5.3.1). + */ +export function expand_message_xmd( + msg: Uint8Array, + DST: Uint8Array, + lenInBytes: number, + H: CHash, +): Uint8Array { + abytes(msg) + abytes(DST) + anum(lenInBytes) + // https://www.rfc-editor.org/rfc/rfc9380#section-5.3.3 + if (DST.length > 255) DST = H(concatBytes(utf8ToBytes('H2C-OVERSIZE-DST-'), DST)) + const { outputLen: b_in_bytes, blockLen: r_in_bytes } = H + const ell = Math.ceil(lenInBytes / b_in_bytes) + if (lenInBytes > 65535 || ell > 255) throw new Error('expand_message_xmd: invalid lenInBytes') + const DST_prime = concatBytes(DST, i2osp(DST.length, 1)) + const Z_pad = i2osp(0, r_in_bytes) + const l_i_b_str = i2osp(lenInBytes, 2) // len_in_bytes_str + const b = new Array(ell) + const b_0 = H(concatBytes(Z_pad, msg, l_i_b_str, i2osp(0, 1), DST_prime)) + b[0] = H(concatBytes(b_0, i2osp(1, 1), DST_prime)) + for (let i = 1; i <= ell; i++) { + const args = [strxor(b_0, b[i - 1]), i2osp(i + 1, 1), DST_prime] + b[i] = H(concatBytes(...args)) + } + const pseudo_random_bytes = concatBytes(...b) + return pseudo_random_bytes.slice(0, lenInBytes) +} + +/** + * Produces a uniformly random byte string using an extendable-output function (XOF) H. + * 1. The collision resistance of H MUST be at least k bits. + * 2. H MUST be an XOF that has been proved indifferentiable from + * a random oracle under a reasonable cryptographic assumption. + * [RFC 9380 5.3.2](https://www.rfc-editor.org/rfc/rfc9380#section-5.3.2). + */ +export function expand_message_xof( + msg: Uint8Array, + DST: Uint8Array, + lenInBytes: number, + k: number, + H: CHash, +): Uint8Array { + abytes(msg) + abytes(DST) + anum(lenInBytes) + // https://www.rfc-editor.org/rfc/rfc9380#section-5.3.3 + // DST = H('H2C-OVERSIZE-DST-' || a_very_long_DST, Math.ceil((lenInBytes * k) / 8)); + if (DST.length > 255) { + const dkLen = Math.ceil((2 * k) / 8) + DST = H.create({ dkLen }).update(utf8ToBytes('H2C-OVERSIZE-DST-')).update(DST).digest() + } + if (lenInBytes > 65535 || DST.length > 255) + throw new Error('expand_message_xof: invalid lenInBytes') + return ( + H.create({ dkLen: lenInBytes }) + .update(msg) + .update(i2osp(lenInBytes, 2)) + // 2. DST_prime = DST || I2OSP(len(DST), 1) + .update(DST) + .update(i2osp(DST.length, 1)) + .digest() + ) +} + +/** + * Hashes arbitrary-length byte strings to a list of one or more elements of a finite field F. + * [RFC 9380 5.2](https://www.rfc-editor.org/rfc/rfc9380#section-5.2). + * @param msg a byte string containing the message to hash + * @param count the number of elements of F to output + * @param options `{DST: string, p: bigint, m: number, k: number, expand: 'xmd' | 'xof', hash: H}`, see above + * @returns [u_0, ..., u_(count - 1)], a list of field elements. + */ +export function hash_to_field(msg: Uint8Array, count: number, options: Opts): bigint[][] { + validateObject(options, { + DST: 'stringOrUint8Array', + p: 'bigint', + m: 'isSafeInteger', + k: 'isSafeInteger', + hash: 'hash', + }) + const { p, k, m, hash, expand, DST: _DST } = options + abytes(msg) + anum(count) + const DST = typeof _DST === 'string' ? utf8ToBytes(_DST) : _DST + const log2p = p.toString(2).length + const L = Math.ceil((log2p + k) / 8) // section 5.1 of ietf draft link above + const len_in_bytes = count * m * L + let prb // pseudo_random_bytes + if (expand === 'xmd') { + prb = expand_message_xmd(msg, DST, len_in_bytes, hash) + } else if (expand === 'xof') { + prb = expand_message_xof(msg, DST, len_in_bytes, k, hash) + } else if (expand === '_internal_pass') { + // for internal tests only + prb = msg + } else { + throw new Error('expand must be "xmd" or "xof"') + } + const u = new Array(count) + for (let i = 0; i < count; i++) { + const e = new Array(m) + for (let j = 0; j < m; j++) { + const elm_offset = L * (j + i * m) + const tv = prb.subarray(elm_offset, elm_offset + L) + e[j] = mod(os2ip(tv), p) + } + u[i] = e + } + return u +} + +export type XY = ( + x: T, + y: T, +) => { + x: T + y: T +} +export function isogenyMap>(field: F, map: [T[], T[], T[], T[]]): XY { + // Make same order as in spec + const COEFF = map.map((i) => Array.from(i).reverse()) + return (x: T, y: T) => { + const [xNum, xDen, yNum, yDen] = COEFF.map((val) => + val.reduce((acc, i) => field.add(field.mul(acc, x), i)), + ) + console.log(x) + if (field.is0(xDen) || field.is0(yDen)) throw new Error('bad point: ZERO') + x = field.div(xNum, xDen) // xNum / xDen + y = field.mul(y, field.div(yNum, yDen)) // y * (yNum / yDev) + return { x: x, y: y } + } +} + +/** Point interface, which curves must implement to work correctly with the module. */ +export interface H2CPoint extends Group> { + add(rhs: H2CPoint): H2CPoint + toAffine(iz?: bigint): AffinePoint + clearCofactor(): H2CPoint + assertValidity(): void +} + +export interface H2CPointConstructor extends GroupConstructor> { + fromAffine(ap: AffinePoint): H2CPoint +} + +export type MapToCurve = (scalar: bigint[]) => AffinePoint + +// Separated from initialization opts, so users won't accidentally change per-curve parameters +// (changing DST is ok!) +export type htfBasicOpts = { DST: UnicodeOrBytes } +export type HTFMethod = (msg: Uint8Array, options?: htfBasicOpts) => H2CPoint +export type MapMethod = (scalars: bigint[]) => H2CPoint +export type Hasher = { + hashToCurve: HTFMethod + encodeToCurve: HTFMethod + mapToCurve: MapMethod + defaults: Opts & { encodeDST?: UnicodeOrBytes } +} + +/** Creates hash-to-curve methods from EC Point and mapToCurve function. */ +export function createHasher( + Point: H2CPointConstructor, + mapToCurve: MapToCurve, + defaults: Opts & { encodeDST?: UnicodeOrBytes }, +): Hasher { + if (typeof mapToCurve !== 'function') throw new Error('mapToCurve() must be defined') + return { + defaults, + + // Encodes byte string to elliptic curve. + // hash_to_curve from https://www.rfc-editor.org/rfc/rfc9380#section-3 + hashToCurve(msg: Uint8Array, options?: htfBasicOpts): H2CPoint { + const u = hash_to_field(msg, 2, { ...defaults, DST: defaults.DST, ...options } as Opts) + const u0 = Point.fromAffine(mapToCurve(u[0])) + const u1 = Point.fromAffine(mapToCurve(u[1])) + const P = u0.add(u1).clearCofactor() + P.assertValidity() + return P + }, + + // Encodes byte string to elliptic curve. + // encode_to_curve from https://www.rfc-editor.org/rfc/rfc9380#section-3 + encodeToCurve(msg: Uint8Array, options?: htfBasicOpts): H2CPoint { + const u = hash_to_field(msg, 1, { ...defaults, DST: defaults.encodeDST, ...options } as Opts) + const P = Point.fromAffine(mapToCurve(u[0])).clearCofactor() + P.assertValidity() + return P + }, + // Same as encodeToCurve, but without hash + mapToCurve(scalars: bigint[]): H2CPoint { + if (!Array.isArray(scalars)) throw new Error('mapToCurve: expected array of bigints') + for (const i of scalars) + if (typeof i !== 'bigint') throw new Error('mapToCurve: expected array of bigints') + const P = Point.fromAffine(mapToCurve(scalars)).clearCofactor() + P.assertValidity() + return P + }, + } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/modular.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/modular.ts new file mode 100644 index 00000000000..8c0b703e745 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/modular.ts @@ -0,0 +1,555 @@ +/** + * Utils for modular division and finite fields. + * A finite field over 11 is integer number operations `mod 11`. + * There is no division: it is replaced by modular multiplicative inverse. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { + bitMask, + bytesToNumberBE, + bytesToNumberLE, + ensureBytes, + numberToBytesBE, + numberToBytesLE, + validateObject, +} from './utils.ts' + +// prettier-ignore +const _0n = BigInt(0), + _1n = BigInt(1), + _2n = /* @__PURE__ */ BigInt(2), + _3n = /* @__PURE__ */ BigInt(3) +// prettier-ignore +const _4n = /* @__PURE__ */ BigInt(4), + _5n = /* @__PURE__ */ BigInt(5), + _8n = /* @__PURE__ */ BigInt(8) +// prettier-ignore +const _9n = /* @__PURE__ */ BigInt(9), + _16n = /* @__PURE__ */ BigInt(16) + +// Calculates a modulo b +export function mod(a: bigint, b: bigint): bigint { + const result = a % b + return result >= _0n ? result : b + result +} +/** + * Efficiently raise num to power and do modular division. + * Unsafe in some contexts: uses ladder, so can expose bigint bits. + * @todo use field version && remove + * @example + * pow(2n, 6n, 11n) // 64n % 11n == 9n + */ +export function pow(num: bigint, power: bigint, modulo: bigint): bigint { + if (power < _0n) throw new Error('invalid exponent, negatives unsupported') + if (modulo <= _0n) throw new Error('invalid modulus') + if (modulo === _1n) return _0n + let res = _1n + while (power > _0n) { + if (power & _1n) res = (res * num) % modulo + num = (num * num) % modulo + power >>= _1n + } + return res +} + +/** Does `x^(2^power)` mod p. `pow2(30, 4)` == `30^(2^4)` */ +export function pow2(x: bigint, power: bigint, modulo: bigint): bigint { + let res = x + while (power-- > _0n) { + res *= res + res %= modulo + } + return res +} + +/** + * Inverses number over modulo. + * Implemented using [Euclidean GCD](https://brilliant.org/wiki/extended-euclidean-algorithm/). + */ +export function invert(number: bigint, modulo: bigint): bigint { + if (number === _0n) throw new Error('invert: expected non-zero number') + if (modulo <= _0n) throw new Error('invert: expected positive modulus, got ' + modulo) + // Fermat's little theorem "CT-like" version inv(n) = n^(m-2) mod m is 30x slower. + let a = mod(number, modulo) + let b = modulo + // prettier-ignore + let x = _0n, + y = _1n, + u = _1n, + v = _0n + while (a !== _0n) { + // JIT applies optimization if those two lines follow each other + const q = b / a + const r = b % a + const m = x - u * q + const n = y - v * q + // prettier-ignore + ;(b = a), (a = r), (x = u), (y = v), (u = m), (v = n) + } + const gcd = b + if (gcd !== _1n) throw new Error('invert: does not exist') + return mod(x, modulo) +} + +/** + * Tonelli-Shanks square root search algorithm. + * 1. https://eprint.iacr.org/2012/685.pdf (page 12) + * 2. Square Roots from 1; 24, 51, 10 to Dan Shanks + * Will start an infinite loop if field order P is not prime. + * @param P field order + * @returns function that takes field Fp (created from P) and number n + */ +export function tonelliShanks(P: bigint): (Fp: IField, n: T) => T { + // Legendre constant: used to calculate Legendre symbol (a | p), + // which denotes the value of a^((p-1)/2) (mod p). + // (a | p) ≡ 1 if a is a square (mod p) + // (a | p) ≡ -1 if a is not a square (mod p) + // (a | p) ≡ 0 if a ≡ 0 (mod p) + const legendreC = (P - _1n) / _2n + + let Q: bigint, S: number, Z: bigint + // Step 1: By factoring out powers of 2 from p - 1, + // find q and s such that p - 1 = q*(2^s) with q odd + for (Q = P - _1n, S = 0; Q % _2n === _0n; Q /= _2n, S++); + + // Step 2: Select a non-square z such that (z | p) ≡ -1 and set c ≡ zq + for (Z = _2n; Z < P && pow(Z, legendreC, P) !== P - _1n; Z++) { + // Crash instead of infinity loop, we cannot reasonable count until P. + if (Z > 1000) throw new Error('Cannot find square root: likely non-prime P') + } + + // Fast-path + if (S === 1) { + const p1div4 = (P + _1n) / _4n + return function tonelliFast(Fp: IField, n: T) { + const root = Fp.pow(n, p1div4) + if (!Fp.eql(Fp.sqr(root), n)) throw new Error('Cannot find square root') + return root + } + } + + // Slow-path + const Q1div2 = (Q + _1n) / _2n + return function tonelliSlow(Fp: IField, n: T): T { + // Step 0: Check that n is indeed a square: (n | p) should not be ≡ -1 + if (Fp.pow(n, legendreC) === Fp.neg(Fp.ONE)) throw new Error('Cannot find square root') + let r = S + // TODO: will fail at Fp2/etc + let g = Fp.pow(Fp.mul(Fp.ONE, Z), Q) // will update both x and b + let x = Fp.pow(n, Q1div2) // first guess at the square root + let b = Fp.pow(n, Q) // first guess at the fudge factor + + while (!Fp.eql(b, Fp.ONE)) { + if (Fp.eql(b, Fp.ZERO)) return Fp.ZERO // https://en.wikipedia.org/wiki/Tonelli%E2%80%93Shanks_algorithm (4. If t = 0, return r = 0) + // Find m such b^(2^m)==1 + let m = 1 + for (let t2 = Fp.sqr(b); m < r; m++) { + if (Fp.eql(t2, Fp.ONE)) break + t2 = Fp.sqr(t2) // t2 *= t2 + } + // NOTE: r-m-1 can be bigger than 32, need to convert to bigint before shift, otherwise there will be overflow + const ge = Fp.pow(g, _1n << BigInt(r - m - 1)) // ge = 2^(r-m-1) + g = Fp.sqr(ge) // g = ge * ge + x = Fp.mul(x, ge) // x *= ge + b = Fp.mul(b, g) // b *= g + r = m + } + return x + } +} + +/** + * Square root for a finite field. It will try to check if optimizations are applicable and fall back to 4: + * + * 1. P ≡ 3 (mod 4) + * 2. P ≡ 5 (mod 8) + * 3. P ≡ 9 (mod 16) + * 4. Tonelli-Shanks algorithm + * + * Different algorithms can give different roots, it is up to user to decide which one they want. + * For example there is FpSqrtOdd/FpSqrtEven to choice root based on oddness (used for hash-to-curve). + */ +export function FpSqrt(P: bigint): (Fp: IField, n: T) => T { + // P ≡ 3 (mod 4) + // √n = n^((P+1)/4) + if (P % _4n === _3n) { + // Not all roots possible! + // const ORDER = + // 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaabn; + // const NUM = 72057594037927816n; + const p1div4 = (P + _1n) / _4n + return function sqrt3mod4(Fp: IField, n: T) { + const root = Fp.pow(n, p1div4) + // Throw if root**2 != n + if (!Fp.eql(Fp.sqr(root), n)) throw new Error('Cannot find square root') + return root + } + } + + // Atkin algorithm for q ≡ 5 (mod 8), https://eprint.iacr.org/2012/685.pdf (page 10) + if (P % _8n === _5n) { + const c1 = (P - _5n) / _8n + return function sqrt5mod8(Fp: IField, n: T) { + const n2 = Fp.mul(n, _2n) + const v = Fp.pow(n2, c1) + const nv = Fp.mul(n, v) + const i = Fp.mul(Fp.mul(nv, _2n), v) + const root = Fp.mul(nv, Fp.sub(i, Fp.ONE)) + if (!Fp.eql(Fp.sqr(root), n)) throw new Error('Cannot find square root') + return root + } + } + + // P ≡ 9 (mod 16) + if (P % _16n === _9n) { + // NOTE: tonelli is too slow for bls-Fp2 calculations even on start + // Means we cannot use sqrt for constants at all! + // + // const c1 = Fp.sqrt(Fp.negate(Fp.ONE)); // 1. c1 = sqrt(-1) in F, i.e., (c1^2) == -1 in F + // const c2 = Fp.sqrt(c1); // 2. c2 = sqrt(c1) in F, i.e., (c2^2) == c1 in F + // const c3 = Fp.sqrt(Fp.negate(c1)); // 3. c3 = sqrt(-c1) in F, i.e., (c3^2) == -c1 in F + // const c4 = (P + _7n) / _16n; // 4. c4 = (q + 7) / 16 # Integer arithmetic + // sqrt = (x) => { + // let tv1 = Fp.pow(x, c4); // 1. tv1 = x^c4 + // let tv2 = Fp.mul(c1, tv1); // 2. tv2 = c1 * tv1 + // const tv3 = Fp.mul(c2, tv1); // 3. tv3 = c2 * tv1 + // let tv4 = Fp.mul(c3, tv1); // 4. tv4 = c3 * tv1 + // const e1 = Fp.equals(Fp.square(tv2), x); // 5. e1 = (tv2^2) == x + // const e2 = Fp.equals(Fp.square(tv3), x); // 6. e2 = (tv3^2) == x + // tv1 = Fp.cmov(tv1, tv2, e1); // 7. tv1 = CMOV(tv1, tv2, e1) # Select tv2 if (tv2^2) == x + // tv2 = Fp.cmov(tv4, tv3, e2); // 8. tv2 = CMOV(tv4, tv3, e2) # Select tv3 if (tv3^2) == x + // const e3 = Fp.equals(Fp.square(tv2), x); // 9. e3 = (tv2^2) == x + // return Fp.cmov(tv1, tv2, e3); // 10. z = CMOV(tv1, tv2, e3) # Select the sqrt from tv1 and tv2 + // } + } + // Other cases: Tonelli-Shanks algorithm + return tonelliShanks(P) +} + +// Little-endian check for first LE bit (last BE bit); +export const isNegativeLE = (num: bigint, modulo: bigint): boolean => + (mod(num, modulo) & _1n) === _1n + +/** Field is not always over prime: for example, Fp2 has ORDER(q)=p^m. */ +export interface IField { + ORDER: bigint + isLE: boolean + BYTES: number + BITS: number + MASK: bigint + ZERO: T + ONE: T + // 1-arg + create: (num: T) => T + isValid: (num: T) => boolean + is0: (num: T) => boolean + neg(num: T): T + inv(num: T): T + sqrt(num: T): T + sqr(num: T): T + // 2-args + eql(lhs: T, rhs: T): boolean + add(lhs: T, rhs: T): T + sub(lhs: T, rhs: T): T + mul(lhs: T, rhs: T | bigint): T + pow(lhs: T, power: bigint): T + div(lhs: T, rhs: T | bigint): T + // N for NonNormalized (for now) + addN(lhs: T, rhs: T): T + subN(lhs: T, rhs: T): T + mulN(lhs: T, rhs: T | bigint): T + sqrN(num: T): T + + // Optional + // Should be same as sgn0 function in + // [RFC9380](https://www.rfc-editor.org/rfc/rfc9380#section-4.1). + // NOTE: sgn0 is 'negative in LE', which is same as odd. And negative in LE is kinda strange definition anyway. + isOdd?(num: T): boolean // Odd instead of even since we have it for Fp2 + // legendre?(num: T): T; + pow(lhs: T, power: bigint): T + invertBatch: (lst: T[]) => T[] + toBytes(num: T): Uint8Array + fromBytes(bytes: Uint8Array): T + // If c is False, CMOV returns a, otherwise it returns b. + cmov(a: T, b: T, c: boolean): T +} +// prettier-ignore +const FIELD_FIELDS = [ + 'create', + 'isValid', + 'is0', + 'neg', + 'inv', + 'sqrt', + 'sqr', + 'eql', + 'add', + 'sub', + 'mul', + 'pow', + 'div', + 'addN', + 'subN', + 'mulN', + 'sqrN', +] as const +export function validateField(field: IField): IField { + const initial = { + ORDER: 'bigint', + MASK: 'bigint', + BYTES: 'isSafeInteger', + BITS: 'isSafeInteger', + } as Record + const opts = FIELD_FIELDS.reduce((map, val: string) => { + map[val] = 'function' + return map + }, initial) + return validateObject(field, opts) +} + +// Generic field functions + +/** + * Same as `pow` but for Fp: non-constant-time. + * Unsafe in some contexts: uses ladder, so can expose bigint bits. + */ +export function FpPow(f: IField, num: T, power: bigint): T { + // Should have same speed as pow for bigints + // TODO: benchmark! + if (power < _0n) throw new Error('invalid exponent, negatives unsupported') + if (power === _0n) return f.ONE + if (power === _1n) return num + let p = f.ONE + let d = num + while (power > _0n) { + if (power & _1n) p = f.mul(p, d) + d = f.sqr(d) + power >>= _1n + } + return p +} + +/** + * Efficiently invert an array of Field elements. + * `inv(0)` will return `undefined` here: make sure to throw an error. + */ +export function FpInvertBatch(f: IField, nums: T[]): T[] { + const tmp = new Array(nums.length) + // Walk from first to last, multiply them by each other MOD p + const lastMultiplied = nums.reduce((acc, num, i) => { + if (f.is0(num)) return acc + tmp[i] = acc + return f.mul(acc, num) + }, f.ONE) + // Invert last element + const inverted = f.inv(lastMultiplied) + // Walk from last to first, multiply them by inverted each other MOD p + nums.reduceRight((acc, num, i) => { + if (f.is0(num)) return acc + tmp[i] = f.mul(acc, tmp[i]) + return f.mul(acc, num) + }, inverted) + return tmp +} + +export function FpDiv(f: IField, lhs: T, rhs: T | bigint): T { + return f.mul(lhs, typeof rhs === 'bigint' ? invert(rhs, f.ORDER) : f.inv(rhs)) +} + +/** + * Legendre symbol. + * * (a | p) ≡ 1 if a is a square (mod p), quadratic residue + * * (a | p) ≡ -1 if a is not a square (mod p), quadratic non residue + * * (a | p) ≡ 0 if a ≡ 0 (mod p) + */ +export function FpLegendre(order: bigint): (f: IField, x: T) => T { + const legendreConst = (order - _1n) / _2n // Integer arithmetic + return (f: IField, x: T): T => f.pow(x, legendreConst) +} + +// This function returns True whenever the value x is a square in the field F. +export function FpIsSquare(f: IField): (x: T) => boolean { + const legendre = FpLegendre(f.ORDER) + return (x: T): boolean => { + const p = legendre(f, x) + return f.eql(p, f.ZERO) || f.eql(p, f.ONE) + } +} + +// CURVE.n lengths +export function nLength( + n: bigint, + nBitLength?: number, +): { + nBitLength: number + nByteLength: number +} { + // Bit size, byte size of CURVE.n + const _nBitLength = nBitLength !== undefined ? nBitLength : n.toString(2).length + const nByteLength = Math.ceil(_nBitLength / 8) + return { nBitLength: _nBitLength, nByteLength } +} + +type FpField = IField & Required, 'isOdd'>> +/** + * Initializes a finite field over prime. + * Major performance optimizations: + * * a) denormalized operations like mulN instead of mul + * * b) same object shape: never add or remove keys + * * c) Object.freeze + * Fragile: always run a benchmark on a change. + * Security note: operations don't check 'isValid' for all elements for performance reasons, + * it is caller responsibility to check this. + * This is low-level code, please make sure you know what you're doing. + * @param ORDER prime positive bigint + * @param bitLen how many bits the field consumes + * @param isLE (def: false) if encoding / decoding should be in little-endian + * @param redef optional faster redefinitions of sqrt and other methods + */ +export function Field( + ORDER: bigint, + bitLen?: number, + isLE = false, + redef: Partial> = {}, +): Readonly { + if (ORDER <= _0n) throw new Error('invalid field: expected ORDER > 0, got ' + ORDER) + const { nBitLength: BITS, nByteLength: BYTES } = nLength(ORDER, bitLen) + if (BYTES > 2048) throw new Error('invalid field: expected ORDER of <= 2048 bytes') + let sqrtP: ReturnType // cached sqrtP + const f: Readonly = Object.freeze({ + ORDER, + isLE, + BITS, + BYTES, + MASK: bitMask(BITS), + ZERO: _0n, + ONE: _1n, + create: (num) => mod(num, ORDER), + isValid: (num) => { + if (typeof num !== 'bigint') + throw new Error('invalid field element: expected bigint, got ' + typeof num) + return _0n <= num && num < ORDER // 0 is valid element, but it's not invertible + }, + is0: (num) => num === _0n, + isOdd: (num) => (num & _1n) === _1n, + neg: (num) => mod(-num, ORDER), + eql: (lhs, rhs) => lhs === rhs, + + sqr: (num) => mod(num * num, ORDER), + add: (lhs, rhs) => mod(lhs + rhs, ORDER), + sub: (lhs, rhs) => mod(lhs - rhs, ORDER), + mul: (lhs, rhs) => mod(lhs * rhs, ORDER), + pow: (num, power) => FpPow(f, num, power), + div: (lhs, rhs) => mod(lhs * invert(rhs, ORDER), ORDER), + + // Same as above, but doesn't normalize + sqrN: (num) => num * num, + addN: (lhs, rhs) => lhs + rhs, + subN: (lhs, rhs) => lhs - rhs, + mulN: (lhs, rhs) => lhs * rhs, + + inv: (num) => invert(num, ORDER), + sqrt: + redef.sqrt || + ((n) => { + if (!sqrtP) sqrtP = FpSqrt(ORDER) + return sqrtP(f, n) + }), + invertBatch: (lst) => FpInvertBatch(f, lst), + // TODO: do we really need constant cmov? + // We don't have const-time bigints anyway, so probably will be not very useful + cmov: (a, b, c) => (c ? b : a), + toBytes: (num) => (isLE ? numberToBytesLE(num, BYTES) : numberToBytesBE(num, BYTES)), + fromBytes: (bytes) => { + if (bytes.length !== BYTES) + throw new Error('Field.fromBytes: expected ' + BYTES + ' bytes, got ' + bytes.length) + return isLE ? bytesToNumberLE(bytes) : bytesToNumberBE(bytes) + }, + } as FpField) + return Object.freeze(f) +} + +export function FpSqrtOdd(Fp: IField, elm: T): T { + if (!Fp.isOdd) throw new Error("Field doesn't have isOdd") + const root = Fp.sqrt(elm) + return Fp.isOdd(root) ? root : Fp.neg(root) +} + +export function FpSqrtEven(Fp: IField, elm: T): T { + if (!Fp.isOdd) throw new Error("Field doesn't have isOdd") + const root = Fp.sqrt(elm) + return Fp.isOdd(root) ? Fp.neg(root) : root +} + +/** + * "Constant-time" private key generation utility. + * Same as mapKeyToField, but accepts less bytes (40 instead of 48 for 32-byte field). + * Which makes it slightly more biased, less secure. + * @deprecated use `mapKeyToField` instead + */ +export function hashToPrivateScalar( + hash: string | Uint8Array, + groupOrder: bigint, + isLE = false, +): bigint { + hash = ensureBytes('privateHash', hash) + const hashLen = hash.length + const minLen = nLength(groupOrder).nByteLength + 8 + if (minLen < 24 || hashLen < minLen || hashLen > 1024) + throw new Error( + 'hashToPrivateScalar: expected ' + minLen + '-1024 bytes of input, got ' + hashLen, + ) + const num = isLE ? bytesToNumberLE(hash) : bytesToNumberBE(hash) + return mod(num, groupOrder - _1n) + _1n +} + +/** + * Returns total number of bytes consumed by the field element. + * For example, 32 bytes for usual 256-bit weierstrass curve. + * @param fieldOrder number of field elements, usually CURVE.n + * @returns byte length of field + */ +export function getFieldBytesLength(fieldOrder: bigint): number { + if (typeof fieldOrder !== 'bigint') throw new Error('field order must be bigint') + const bitLength = fieldOrder.toString(2).length + return Math.ceil(bitLength / 8) +} + +/** + * Returns minimal amount of bytes that can be safely reduced + * by field order. + * Should be 2^-128 for 128-bit curve such as P256. + * @param fieldOrder number of field elements, usually CURVE.n + * @returns byte length of target hash + */ +export function getMinHashLength(fieldOrder: bigint): number { + const length = getFieldBytesLength(fieldOrder) + return length + Math.ceil(length / 2) +} + +/** + * "Constant-time" private key generation utility. + * Can take (n + n/2) or more bytes of uniform input e.g. from CSPRNG or KDF + * and convert them into private scalar, with the modulo bias being negligible. + * Needs at least 48 bytes of input for 32-byte private key. + * https://research.kudelskisecurity.com/2020/07/28/the-definitive-guide-to-modulo-bias-and-how-to-avoid-it/ + * FIPS 186-5, A.2 https://csrc.nist.gov/publications/detail/fips/186/5/final + * RFC 9380, https://www.rfc-editor.org/rfc/rfc9380#section-5 + * @param hash hash output from SHA3 or a similar function + * @param groupOrder size of subgroup - (e.g. secp256k1.CURVE.n) + * @param isLE interpret hash bytes as LE num + * @returns valid private scalar + */ +export function mapHashToField(key: Uint8Array, fieldOrder: bigint, isLE = false): Uint8Array { + const len = key.length + const fieldLen = getFieldBytesLength(fieldOrder) + const minLen = getMinHashLength(fieldOrder) + // No small numbers: need to understand bias story. No huge numbers: easier to detect JS timings. + if (len < 16 || len < minLen || len > 1024) + throw new Error('expected ' + minLen + '-1024 bytes of input, got ' + len) + const num = isLE ? bytesToNumberLE(key) : bytesToNumberBE(key) + // `mod(x, 11)` can sometimes produce 0. `mod(x, 10) + 1` is the same, but no 0 + const reduced = mod(num, fieldOrder - _1n) + _1n + return isLE ? numberToBytesLE(reduced, fieldLen) : numberToBytesBE(reduced, fieldLen) +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/montgomery.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/montgomery.ts new file mode 100644 index 00000000000..122406e3350 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/montgomery.ts @@ -0,0 +1,190 @@ +/** + * Montgomery curve methods. It's not really whole montgomery curve, + * just bunch of very specific methods for X25519 / X448 from + * [RFC 7748](https://www.rfc-editor.org/rfc/rfc7748) + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { mod, pow } from './modular.ts' +import { aInRange, bytesToNumberLE, ensureBytes, numberToBytesLE, validateObject } from './utils.ts' + +const _0n = BigInt(0) +const _1n = BigInt(1) +type Hex = string | Uint8Array + +export type CurveType = { + P: bigint // finite field prime + nByteLength: number + adjustScalarBytes?: (bytes: Uint8Array) => Uint8Array + domain?: (data: Uint8Array, ctx: Uint8Array, phflag: boolean) => Uint8Array + a: bigint + montgomeryBits: number + powPminus2?: (x: bigint) => bigint + xyToU?: (x: bigint, y: bigint) => bigint + Gu: bigint + randomBytes?: (bytesLength?: number) => Uint8Array +} + +export type CurveFn = { + scalarMult: (scalar: Hex, u: Hex) => Uint8Array + scalarMultBase: (scalar: Hex) => Uint8Array + getSharedSecret: (privateKeyA: Hex, publicKeyB: Hex) => Uint8Array + getPublicKey: (privateKey: Hex) => Uint8Array + utils: { randomPrivateKey: () => Uint8Array } + GuBytes: Uint8Array +} + +function validateOpts(curve: CurveType) { + validateObject( + curve, + { + a: 'bigint', + }, + { + montgomeryBits: 'isSafeInteger', + nByteLength: 'isSafeInteger', + adjustScalarBytes: 'function', + domain: 'function', + powPminus2: 'function', + Gu: 'bigint', + }, + ) + // Set defaults + return Object.freeze({ ...curve } as const) +} + +// Uses only one coordinate instead of two +export function montgomery(curveDef: CurveType): CurveFn { + const CURVE = validateOpts(curveDef) + const { P } = CURVE + const modP = (n: bigint) => mod(n, P) + const montgomeryBits = CURVE.montgomeryBits + const montgomeryBytes = Math.ceil(montgomeryBits / 8) + const fieldLen = CURVE.nByteLength + const adjustScalarBytes = CURVE.adjustScalarBytes || ((bytes: Uint8Array) => bytes) + const powPminus2 = CURVE.powPminus2 || ((x: bigint) => pow(x, P - BigInt(2), P)) + + // cswap from RFC7748. But it is not from RFC7748! + /* + cswap(swap, x_2, x_3): + dummy = mask(swap) AND (x_2 XOR x_3) + x_2 = x_2 XOR dummy + x_3 = x_3 XOR dummy + Return (x_2, x_3) + Where mask(swap) is the all-1 or all-0 word of the same length as x_2 + and x_3, computed, e.g., as mask(swap) = 0 - swap. + */ + function cswap(swap: bigint, x_2: bigint, x_3: bigint): [bigint, bigint] { + const dummy = modP(swap * (x_2 - x_3)) + x_2 = modP(x_2 - dummy) + x_3 = modP(x_3 + dummy) + return [x_2, x_3] + } + + // x25519 from 4 + // The constant a24 is (486662 - 2) / 4 = 121665 for curve25519/X25519 + const a24 = (CURVE.a - BigInt(2)) / BigInt(4) + /** + * + * @param pointU u coordinate (x) on Montgomery Curve 25519 + * @param scalar by which the point would be multiplied + * @returns new Point on Montgomery curve + */ + function montgomeryLadder(u: bigint, scalar: bigint): bigint { + aInRange('u', u, _0n, P) + aInRange('scalar', scalar, _0n, P) + // Section 5: Implementations MUST accept non-canonical values and process them as + // if they had been reduced modulo the field prime. + const k = scalar + const x_1 = u + let x_2 = _1n + let z_2 = _0n + let x_3 = u + let z_3 = _1n + let swap = _0n + let sw: [bigint, bigint] + for (let t = BigInt(montgomeryBits - 1); t >= _0n; t--) { + const k_t = (k >> t) & _1n + swap ^= k_t + sw = cswap(swap, x_2, x_3) + x_2 = sw[0] + x_3 = sw[1] + sw = cswap(swap, z_2, z_3) + z_2 = sw[0] + z_3 = sw[1] + swap = k_t + + const A = x_2 + z_2 + const AA = modP(A * A) + const B = x_2 - z_2 + const BB = modP(B * B) + const E = AA - BB + const C = x_3 + z_3 + const D = x_3 - z_3 + const DA = modP(D * A) + const CB = modP(C * B) + const dacb = DA + CB + const da_cb = DA - CB + x_3 = modP(dacb * dacb) + z_3 = modP(x_1 * modP(da_cb * da_cb)) + x_2 = modP(AA * BB) + z_2 = modP(E * (AA + modP(a24 * E))) + } + // (x_2, x_3) = cswap(swap, x_2, x_3) + sw = cswap(swap, x_2, x_3) + x_2 = sw[0] + x_3 = sw[1] + // (z_2, z_3) = cswap(swap, z_2, z_3) + sw = cswap(swap, z_2, z_3) + z_2 = sw[0] + z_3 = sw[1] + // z_2^(p - 2) + const z2 = powPminus2(z_2) + // Return x_2 * (z_2^(p - 2)) + return modP(x_2 * z2) + } + + function encodeUCoordinate(u: bigint): Uint8Array { + return numberToBytesLE(modP(u), montgomeryBytes) + } + + function decodeUCoordinate(uEnc: Hex): bigint { + // Section 5: When receiving such an array, implementations of X25519 + // MUST mask the most significant bit in the final byte. + const u = ensureBytes('u coordinate', uEnc, montgomeryBytes) + if (fieldLen === 32) u[31] &= 127 // 0b0111_1111 + return bytesToNumberLE(u) + } + function decodeScalar(n: Hex): bigint { + const bytes = ensureBytes('scalar', n) + const len = bytes.length + if (len !== montgomeryBytes && len !== fieldLen) { + let valid = '' + montgomeryBytes + ' or ' + fieldLen + throw new Error('invalid scalar, expected ' + valid + ' bytes, got ' + len) + } + return bytesToNumberLE(adjustScalarBytes(bytes)) + } + function scalarMult(scalar: Hex, u: Hex): Uint8Array { + const pointU = decodeUCoordinate(u) + const _scalar = decodeScalar(scalar) + const pu = montgomeryLadder(pointU, _scalar) + // The result was not contributory + // https://cr.yp.to/ecdh.html#validate + if (pu === _0n) throw new Error('invalid private or public key received') + return encodeUCoordinate(pu) + } + // Computes public key from private. By doing scalar multiplication of base point. + const GuBytes = encodeUCoordinate(CURVE.Gu) + function scalarMultBase(scalar: Hex): Uint8Array { + return scalarMult(scalar, GuBytes) + } + + return { + scalarMult, + scalarMultBase, + getSharedSecret: (privateKey: Hex, publicKey: Hex) => scalarMult(privateKey, publicKey), + getPublicKey: (privateKey: Hex): Uint8Array => scalarMultBase(privateKey), + utils: { randomPrivateKey: () => CURVE.randomBytes!(CURVE.nByteLength) }, + GuBytes: GuBytes, + } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/poseidon.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/poseidon.ts new file mode 100644 index 00000000000..6edaad90986 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/poseidon.ts @@ -0,0 +1,329 @@ +/** + * Implements [Poseidon](https://www.poseidon-hash.info) ZK-friendly hash. + * + * There are many poseidon variants with different constants. + * We don't provide them: you should construct them manually. + * Check out [micro-starknet](https://github.com/paulmillr/micro-starknet) package for a proper example. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { FpPow, type IField, validateField } from './modular.ts' +import { bitGet } from './utils.ts' + +// Grain LFSR (Linear-Feedback Shift Register): https://eprint.iacr.org/2009/109.pdf +function grainLFSR(state: number[]): () => boolean { + let pos = 0 + if (state.length !== 80) throw new Error('grainLFRS: wrong state length, should be 80 bits') + const getBit = (): boolean => { + const r = (offset: number) => state[(pos + offset) % 80] + const bit = r(62) ^ r(51) ^ r(38) ^ r(23) ^ r(13) ^ r(0) + state[pos] = bit + pos = ++pos % 80 + return !!bit + } + for (let i = 0; i < 160; i++) getBit() + return () => { + // https://en.wikipedia.org/wiki/Shrinking_generator + while (true) { + const b1 = getBit() + const b2 = getBit() + if (!b1) continue + return b2 + } + } +} + +export type PoseidonBasicOpts = { + Fp: IField + t: number // t = rate + capacity + roundsFull: number + roundsPartial: number + isSboxInverse?: boolean +} + +function validateBasicOpts(opts: PoseidonBasicOpts) { + const { Fp, roundsFull } = opts + validateField(Fp) + for (const i of ['t', 'roundsFull', 'roundsPartial'] as const) { + if (typeof opts[i] !== 'number' || !Number.isSafeInteger(opts[i])) + throw new Error('invalid number ' + i) + } + if (opts.isSboxInverse !== undefined && typeof opts.isSboxInverse !== 'boolean') + throw new Error(`Poseidon: invalid param isSboxInverse=${opts.isSboxInverse}`) + if (roundsFull & 1) throw new Error('roundsFull is not even' + roundsFull) +} + +function poseidonGrain(opts: PoseidonBasicOpts) { + validateBasicOpts(opts) + const { Fp } = opts + const state = Array(80).fill(1) + let pos = 0 + const writeBits = (value: bigint, bitCount: number) => { + for (let i = bitCount - 1; i >= 0; i--) state[pos++] = Number(bitGet(value, i)) + } + writeBits(1n, 2) // prime field + writeBits(opts.isSboxInverse ? 1n : 0n, 4) // b2..b5 + writeBits(BigInt(Fp.BITS), 12) // b6..b17 + writeBits(BigInt(opts.t), 12) // b18..b29 + writeBits(BigInt(opts.roundsFull), 10) // b30..b39 + writeBits(BigInt(opts.roundsPartial), 10) // b40..b49 + + const getBit = grainLFSR(state) + return (count: number, reject: boolean): bigint[] => { + const res: bigint[] = [] + for (let i = 0; i < count; i++) { + while (true) { + let num = 0n + for (let i = 0; i < Fp.BITS; i++) { + num <<= 1n + if (getBit()) num |= 1n + } + if (reject && num >= Fp.ORDER) continue // rejection sampling + res.push(Fp.create(num)) + break + } + } + return res + } +} + +export type PoseidonGrainOpts = PoseidonBasicOpts & { + sboxPower?: number +} + +type PoseidonConstants = { mds: bigint[][]; roundConstants: bigint[][] } + +// NOTE: this is not standard but used often for constant generation for poseidon +// (grain LFRS-like structure) +export function grainGenConstants(opts: PoseidonGrainOpts, skipMDS: number = 0): PoseidonConstants { + const { Fp, t, roundsFull, roundsPartial } = opts + const rounds = roundsFull + roundsPartial + const sample = poseidonGrain(opts) + const roundConstants: bigint[][] = [] + for (let r = 0; r < rounds; r++) roundConstants.push(sample(t, true)) + if (skipMDS > 0) for (let i = 0; i < skipMDS; i++) sample(2 * t, false) + const xs = sample(t, false) + const ys = sample(t, false) + // Construct MDS Matrix M[i][j] = 1 / (xs[i] + ys[j]) + const mds: bigint[][] = [] + for (let i = 0; i < t; i++) { + const row: bigint[] = [] + for (let j = 0; j < t; j++) { + const xy = Fp.add(xs[i], ys[j]) + if (Fp.is0(xy)) + throw new Error(`Error generating MDS matrix: xs[${i}] + ys[${j}] resulted in zero.`) + row.push(xy) + } + mds.push(Fp.invertBatch(row)) + } + + return { roundConstants, mds } +} + +export type PoseidonOpts = PoseidonBasicOpts & + PoseidonConstants & { + sboxPower?: number + reversePartialPowIdx?: boolean // Hack for stark + } + +export function validateOpts(opts: PoseidonOpts): Readonly<{ + rounds: number + sboxFn: (n: bigint) => bigint + roundConstants: bigint[][] + mds: bigint[][] + Fp: IField + t: number + roundsFull: number + roundsPartial: number + sboxPower?: number + reversePartialPowIdx?: boolean // Hack for stark +}> { + validateBasicOpts(opts) + const { Fp, mds, reversePartialPowIdx: rev, roundConstants: rc } = opts + const { roundsFull, roundsPartial, sboxPower, t } = opts + + // MDS is TxT matrix + if (!Array.isArray(mds) || mds.length !== t) throw new Error('Poseidon: invalid MDS matrix') + const _mds = mds.map((mdsRow) => { + if (!Array.isArray(mdsRow) || mdsRow.length !== t) + throw new Error('invalid MDS matrix row: ' + mdsRow) + return mdsRow.map((i) => { + if (typeof i !== 'bigint') throw new Error('invalid MDS matrix bigint: ' + i) + return Fp.create(i) + }) + }) + + if (rev !== undefined && typeof rev !== 'boolean') + throw new Error('invalid param reversePartialPowIdx=' + rev) + + if (roundsFull & 1) throw new Error('roundsFull is not even' + roundsFull) + const rounds = roundsFull + roundsPartial + + if (!Array.isArray(rc) || rc.length !== rounds) + throw new Error('Poseidon: invalid round constants') + const roundConstants = rc.map((rc) => { + if (!Array.isArray(rc) || rc.length !== t) throw new Error('invalid round constants') + return rc.map((i) => { + if (typeof i !== 'bigint' || !Fp.isValid(i)) throw new Error('invalid round constant') + return Fp.create(i) + }) + }) + + if (!sboxPower || ![3, 5, 7, 17].includes(sboxPower)) throw new Error('invalid sboxPower') + const _sboxPower = BigInt(sboxPower) + let sboxFn = (n: bigint) => FpPow(Fp, n, _sboxPower) + // Unwrapped sbox power for common cases (195->142μs) + if (sboxPower === 3) sboxFn = (n: bigint) => Fp.mul(Fp.sqrN(n), n) + else if (sboxPower === 5) sboxFn = (n: bigint) => Fp.mul(Fp.sqrN(Fp.sqrN(n)), n) + + return Object.freeze({ ...opts, rounds, sboxFn, roundConstants, mds: _mds }) +} + +export function splitConstants(rc: bigint[], t: number): bigint[][] { + if (typeof t !== 'number') throw new Error('poseidonSplitConstants: invalid t') + if (!Array.isArray(rc) || rc.length % t) throw new Error('poseidonSplitConstants: invalid rc') + const res = [] + let tmp = [] + for (let i = 0; i < rc.length; i++) { + tmp.push(rc[i]) + if (tmp.length === t) { + res.push(tmp) + tmp = [] + } + } + return res +} + +/** Poseidon NTT-friendly hash. */ +export function poseidon(opts: PoseidonOpts): { + (values: bigint[]): bigint[] + // For verification in tests + roundConstants: bigint[][] +} { + const _opts = validateOpts(opts) + const { Fp, mds, roundConstants, rounds: totalRounds, roundsPartial, sboxFn, t } = _opts + const halfRoundsFull = _opts.roundsFull / 2 + const partialIdx = _opts.reversePartialPowIdx ? t - 1 : 0 + const poseidonRound = (values: bigint[], isFull: boolean, idx: number) => { + values = values.map((i, j) => Fp.add(i, roundConstants[idx][j])) + + if (isFull) values = values.map((i) => sboxFn(i)) + else values[partialIdx] = sboxFn(values[partialIdx]) + // Matrix multiplication + values = mds.map((i) => i.reduce((acc, i, j) => Fp.add(acc, Fp.mulN(i, values[j])), Fp.ZERO)) + return values + } + const poseidonHash = function poseidonHash(values: bigint[]) { + if (!Array.isArray(values) || values.length !== t) + throw new Error('invalid values, expected array of bigints with length ' + t) + values = values.map((i) => { + if (typeof i !== 'bigint') throw new Error('invalid bigint=' + i) + return Fp.create(i) + }) + let lastRound = 0 + // Apply r_f/2 full rounds. + for (let i = 0; i < halfRoundsFull; i++) values = poseidonRound(values, true, lastRound++) + // Apply r_p partial rounds. + for (let i = 0; i < roundsPartial; i++) values = poseidonRound(values, false, lastRound++) + // Apply r_f/2 full rounds. + for (let i = 0; i < halfRoundsFull; i++) values = poseidonRound(values, true, lastRound++) + + if (lastRound !== totalRounds) throw new Error('invalid number of rounds') + return values + } + // For verification in tests + poseidonHash.roundConstants = roundConstants + return poseidonHash +} + +export class PoseidonSponge { + private Fp: IField + readonly rate: number + readonly capacity: number + readonly hash: ReturnType + private state: bigint[] // [...capacity, ...rate] + private pos = 0 + private isAbsorbing = true + + constructor( + Fp: IField, + rate: number, + capacity: number, + hash: ReturnType, + ) { + this.Fp = Fp + this.hash = hash + this.rate = rate + this.capacity = capacity + this.state = new Array(rate + capacity) + this.clean() + } + private process(): void { + this.state = this.hash(this.state) + } + absorb(input: bigint[]): void { + for (const i of input) + if (typeof i !== 'bigint' || !this.Fp.isValid(i)) throw new Error('invalid input: ' + i) + for (let i = 0; i < input.length; ) { + if (!this.isAbsorbing || this.pos === this.rate) { + this.process() + this.pos = 0 + this.isAbsorbing = true + } + const chunk = Math.min(this.rate - this.pos, input.length - i) + for (let j = 0; j < chunk; j++) { + const idx = this.capacity + this.pos++ + this.state[idx] = this.Fp.add(this.state[idx], input[i++]) + } + } + } + squeeze(count: number): bigint[] { + const res: bigint[] = [] + while (res.length < count) { + if (this.isAbsorbing || this.pos === this.rate) { + this.process() + this.pos = 0 + this.isAbsorbing = false + } + const chunk = Math.min(this.rate - this.pos, count - res.length) + for (let i = 0; i < chunk; i++) res.push(this.state[this.capacity + this.pos++]) + } + return res + } + clean(): void { + this.state.fill(this.Fp.ZERO) + this.isAbsorbing = true + this.pos = 0 + } + clone(): PoseidonSponge { + const c = new PoseidonSponge(this.Fp, this.rate, this.capacity, this.hash) + c.pos = this.pos + c.state = [...this.state] + return c + } +} + +export type PoseidonSpongeOpts = Omit & { + rate: number + capacity: number +} + +/** + * The method is not defined in spec, but nevertheless used often. + * Check carefully for compatibility: there are many edge cases, like absorbing an empty array. + * We cross-test against: + * - https://github.com/ProvableHQ/snarkVM/tree/staging/algorithms + * - https://github.com/arkworks-rs/crypto-primitives/tree/main + */ +export function poseidonSponge(opts: PoseidonSpongeOpts): () => PoseidonSponge { + for (const i of ['rate', 'capacity'] as const) { + if (typeof opts[i] !== 'number' || !Number.isSafeInteger(opts[i])) + throw new Error('invalid number ' + i) + } + const { rate, capacity } = opts + const t = opts.rate + opts.capacity + // Re-use hash instance between multiple instances + const hash = poseidon({ ...opts, t }) + const { Fp } = opts + return () => new PoseidonSponge(Fp, rate, capacity, hash) +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/tower.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/tower.ts new file mode 100644 index 00000000000..4135dc08816 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/tower.ts @@ -0,0 +1,664 @@ +/** + * Towered extension fields. + * Rather than implementing a massive 12th-degree extension directly, it is more efficient + * to build it up from smaller extensions: a tower of extensions. + * + * For BLS12-381, the Fp12 field is implemented as a quadratic (degree two) extension, + * on top of a cubic (degree three) extension, on top of a quadratic extension of Fp. + * + * For more info: "Pairings for beginners" by Costello, section 7.3. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import * as mod from './modular.ts' +import { bitLen, bitMask, concatBytes, notImplemented } from './utils.ts' +import type { ProjConstructor, ProjPointType } from './weierstrass.ts' + +// Be friendly to bad ECMAScript parsers by not using bigint literals +// prettier-ignore +const _0n = BigInt(0), + _1n = BigInt(1), + _2n = BigInt(2), + _3n = BigInt(3) + +// Fp₂ over complex plane +export type BigintTuple = [bigint, bigint] +export type Fp = bigint +// Finite extension field over irreducible polynominal. +// Fp(u) / (u² - β) where β = -1 +export type Fp2 = { c0: bigint; c1: bigint } +export type BigintSix = [bigint, bigint, bigint, bigint, bigint, bigint] +export type Fp6 = { c0: Fp2; c1: Fp2; c2: Fp2 } +export type Fp12 = { c0: Fp6; c1: Fp6 } // Fp₁₂ = Fp₆² => Fp₂³, Fp₆(w) / (w² - γ) where γ = v +// prettier-ignore +export type BigintTwelve = [ + bigint, + bigint, + bigint, + bigint, + bigint, + bigint, + bigint, + bigint, + bigint, + bigint, + bigint, + bigint, +] + +export type Fp2Bls = mod.IField & { + reim: (num: Fp2) => { re: Fp; im: Fp } + mulByB: (num: Fp2) => Fp2 + frobeniusMap(num: Fp2, power: number): Fp2 + fromBigTuple(num: [bigint, bigint]): Fp2 +} + +export type Fp12Bls = mod.IField & { + frobeniusMap(num: Fp12, power: number): Fp12 + mul014(num: Fp12, o0: Fp2, o1: Fp2, o4: Fp2): Fp12 + mul034(num: Fp12, o0: Fp2, o3: Fp2, o4: Fp2): Fp12 + conjugate(num: Fp12): Fp12 + finalExponentiate(num: Fp12): Fp12 + fromBigTwelve(num: BigintTwelve): Fp12 +} + +function calcFrobeniusCoefficients( + Fp: mod.IField, + nonResidue: T, + modulus: bigint, + degree: number, + num: number = 1, + divisor?: number, +) { + const _divisor = BigInt(divisor === undefined ? degree : divisor) + const towerModulus: any = modulus ** BigInt(degree) + const res: T[][] = [] + for (let i = 0; i < num; i++) { + const a = BigInt(i + 1) + const powers: T[] = [] + for (let j = 0, qPower = _1n; j < degree; j++) { + const power = ((a * qPower - a) / _divisor) % towerModulus + powers.push(Fp.pow(nonResidue, power)) + qPower *= modulus + } + res.push(powers) + } + return res +} + +// This works same at least for bls12-381, bn254 and bls12-377 +export function psiFrobenius( + Fp: mod.IField, + Fp2: Fp2Bls, + base: Fp2, +): { + psi: (x: Fp2, y: Fp2) => [Fp2, Fp2] + psi2: (x: Fp2, y: Fp2) => [Fp2, Fp2] + G2psi: (c: ProjConstructor, P: ProjPointType) => ProjPointType + G2psi2: (c: ProjConstructor, P: ProjPointType) => ProjPointType + PSI_X: Fp2 + PSI_Y: Fp2 + PSI2_X: Fp2 + PSI2_Y: Fp2 +} { + // Ψ endomorphism + const PSI_X = Fp2.pow(base, (Fp.ORDER - _1n) / _3n) // u^((p-1)/3) + const PSI_Y = Fp2.pow(base, (Fp.ORDER - _1n) / _2n) // u^((p-1)/2) + function psi(x: Fp2, y: Fp2): [Fp2, Fp2] { + // This x10 faster than previous version in bls12-381 + const x2 = Fp2.mul(Fp2.frobeniusMap(x, 1), PSI_X) + const y2 = Fp2.mul(Fp2.frobeniusMap(y, 1), PSI_Y) + return [x2, y2] + } + // Ψ²(P) endomorphism (psi2(x) = psi(psi(x))) + const PSI2_X = Fp2.pow(base, (Fp.ORDER ** _2n - _1n) / _3n) // u^((p^2 - 1)/3) + // This equals -1, which causes y to be Fp2.neg(y). + // But not sure if there are case when this is not true? + const PSI2_Y = Fp2.pow(base, (Fp.ORDER ** _2n - _1n) / _2n) // u^((p^2 - 1)/3) + if (!Fp2.eql(PSI2_Y, Fp2.neg(Fp2.ONE))) throw new Error('psiFrobenius: PSI2_Y!==-1') + function psi2(x: Fp2, y: Fp2): [Fp2, Fp2] { + return [Fp2.mul(x, PSI2_X), Fp2.neg(y)] + } + // Map points + const mapAffine = + (fn: (x: T, y: T) => [T, T]) => + (c: ProjConstructor, P: ProjPointType) => { + const affine = P.toAffine() + const p = fn(affine.x, affine.y) + return c.fromAffine({ x: p[0], y: p[1] }) + } + const G2psi = mapAffine(psi) + const G2psi2 = mapAffine(psi2) + return { psi, psi2, G2psi, G2psi2, PSI_X, PSI_Y, PSI2_X, PSI2_Y } +} + +export type Tower12Opts = { + ORDER: bigint + NONRESIDUE?: Fp + // Fp2 + FP2_NONRESIDUE: BigintTuple + Fp2sqrt?: (num: Fp2) => Fp2 + Fp2mulByB: (num: Fp2) => Fp2 + // Fp12 + Fp12cyclotomicSquare: (num: Fp12) => Fp12 + Fp12cyclotomicExp: (num: Fp12, n: bigint) => Fp12 + Fp12finalExponentiate: (num: Fp12) => Fp12 +} + +export function tower12(opts: Tower12Opts): { + Fp: Readonly & Required, 'isOdd'>>> + Fp2: mod.IField & { + NONRESIDUE: Fp2 + fromBigTuple: (tuple: BigintTuple | bigint[]) => Fp2 + reim: (num: Fp2) => { re: bigint; im: bigint } + mulByNonresidue: (num: Fp2) => Fp2 + mulByB: (num: Fp2) => Fp2 + frobeniusMap(num: Fp2, power: number): Fp2 + } + Fp6: mod.IField & { + fromBigSix: (tuple: BigintSix) => Fp6 + mulByNonresidue: (num: Fp6) => Fp6 + frobeniusMap(num: Fp6, power: number): Fp6 + mul1(num: Fp6, b1: Fp2): Fp6 + mul01(num: Fp6, b0: Fp2, b1: Fp2): Fp6 + mulByFp2(lhs: Fp6, rhs: Fp2): Fp6 + } + Fp4Square: (a: Fp2, b: Fp2) => { first: Fp2; second: Fp2 } + Fp12: mod.IField & { + fromBigTwelve: (t: BigintTwelve) => Fp12 + frobeniusMap(num: Fp12, power: number): Fp12 + mul014(num: Fp12, o0: Fp2, o1: Fp2, o4: Fp2): Fp12 + mul034(num: Fp12, o0: Fp2, o3: Fp2, o4: Fp2): Fp12 + mulByFp2(lhs: Fp12, rhs: Fp2): Fp12 + conjugate(num: Fp12): Fp12 + finalExponentiate(num: Fp12): Fp12 + _cyclotomicSquare(num: Fp12): Fp12 + _cyclotomicExp(num: Fp12, n: bigint): Fp12 + } +} { + const { ORDER } = opts + // Fp + const Fp = mod.Field(ORDER) + const FpNONRESIDUE = Fp.create(opts.NONRESIDUE || BigInt(-1)) + const FpLegendre = mod.FpLegendre(ORDER) + const Fpdiv2 = Fp.div(Fp.ONE, _2n) // 1/2 + + // Fp2 + const FP2_FROBENIUS_COEFFICIENTS = calcFrobeniusCoefficients(Fp, FpNONRESIDUE, Fp.ORDER, 2)[0] + const Fp2Add = ({ c0, c1 }: Fp2, { c0: r0, c1: r1 }: Fp2) => ({ + c0: Fp.add(c0, r0), + c1: Fp.add(c1, r1), + }) + const Fp2Subtract = ({ c0, c1 }: Fp2, { c0: r0, c1: r1 }: Fp2) => ({ + c0: Fp.sub(c0, r0), + c1: Fp.sub(c1, r1), + }) + const Fp2Multiply = ({ c0, c1 }: Fp2, rhs: Fp2) => { + if (typeof rhs === 'bigint') return { c0: Fp.mul(c0, rhs), c1: Fp.mul(c1, rhs) } + // (a+bi)(c+di) = (ac−bd) + (ad+bc)i + const { c0: r0, c1: r1 } = rhs + let t1 = Fp.mul(c0, r0) // c0 * o0 + let t2 = Fp.mul(c1, r1) // c1 * o1 + // (T1 - T2) + ((c0 + c1) * (r0 + r1) - (T1 + T2))*i + const o0 = Fp.sub(t1, t2) + const o1 = Fp.sub(Fp.mul(Fp.add(c0, c1), Fp.add(r0, r1)), Fp.add(t1, t2)) + return { c0: o0, c1: o1 } + } + const Fp2Square = ({ c0, c1 }: Fp2) => { + const a = Fp.add(c0, c1) + const b = Fp.sub(c0, c1) + const c = Fp.add(c0, c0) + return { c0: Fp.mul(a, b), c1: Fp.mul(c, c1) } + } + type Fp2Utils = { + NONRESIDUE: Fp2 + fromBigTuple: (tuple: BigintTuple | bigint[]) => Fp2 + reim: (num: Fp2) => { re: bigint; im: bigint } + mulByNonresidue: (num: Fp2) => Fp2 + mulByB: (num: Fp2) => Fp2 + frobeniusMap(num: Fp2, power: number): Fp2 + } + const Fp2fromBigTuple = (tuple: BigintTuple | bigint[]) => { + if (tuple.length !== 2) throw new Error('invalid tuple') + const fps = tuple.map((n) => Fp.create(n)) as [Fp, Fp] + return { c0: fps[0], c1: fps[1] } + } + + const FP2_ORDER = ORDER * ORDER + const Fp2Nonresidue = Fp2fromBigTuple(opts.FP2_NONRESIDUE) + const Fp2: mod.IField & Fp2Utils = { + ORDER: FP2_ORDER, + isLE: Fp.isLE, + NONRESIDUE: Fp2Nonresidue, + BITS: bitLen(FP2_ORDER), + BYTES: Math.ceil(bitLen(FP2_ORDER) / 8), + MASK: bitMask(bitLen(FP2_ORDER)), + ZERO: { c0: Fp.ZERO, c1: Fp.ZERO }, + ONE: { c0: Fp.ONE, c1: Fp.ZERO }, + create: (num) => num, + isValid: ({ c0, c1 }) => typeof c0 === 'bigint' && typeof c1 === 'bigint', + is0: ({ c0, c1 }) => Fp.is0(c0) && Fp.is0(c1), + eql: ({ c0, c1 }: Fp2, { c0: r0, c1: r1 }: Fp2) => Fp.eql(c0, r0) && Fp.eql(c1, r1), + neg: ({ c0, c1 }) => ({ c0: Fp.neg(c0), c1: Fp.neg(c1) }), + pow: (num, power) => mod.FpPow(Fp2, num, power), + invertBatch: (nums) => mod.FpInvertBatch(Fp2, nums), + // Normalized + add: Fp2Add, + sub: Fp2Subtract, + mul: Fp2Multiply, + sqr: Fp2Square, + // NonNormalized stuff + addN: Fp2Add, + subN: Fp2Subtract, + mulN: Fp2Multiply, + sqrN: Fp2Square, + // Why inversion for bigint inside Fp instead of Fp2? it is even used in that context? + div: (lhs, rhs) => + Fp2.mul(lhs, typeof rhs === 'bigint' ? Fp.inv(Fp.create(rhs)) : Fp2.inv(rhs)), + inv: ({ c0: a, c1: b }) => { + // We wish to find the multiplicative inverse of a nonzero + // element a + bu in Fp2. We leverage an identity + // + // (a + bu)(a - bu) = a² + b² + // + // which holds because u² = -1. This can be rewritten as + // + // (a + bu)(a - bu)/(a² + b²) = 1 + // + // because a² + b² = 0 has no nonzero solutions for (a, b). + // This gives that (a - bu)/(a² + b²) is the inverse + // of (a + bu). Importantly, this can be computing using + // only a single inversion in Fp. + const factor = Fp.inv(Fp.create(a * a + b * b)) + return { c0: Fp.mul(factor, Fp.create(a)), c1: Fp.mul(factor, Fp.create(-b)) } + }, + sqrt: (num) => { + if (opts.Fp2sqrt) return opts.Fp2sqrt(num) + // This is generic for all quadratic extensions (Fp2) + const { c0, c1 } = num + if (Fp.is0(c1)) { + // if c0 is quadratic residue + if (Fp.eql(FpLegendre(Fp, c0), Fp.ONE)) return Fp2.create({ c0: Fp.sqrt(c0), c1: Fp.ZERO }) + else return Fp2.create({ c0: Fp.ZERO, c1: Fp.sqrt(Fp.div(c0, FpNONRESIDUE)) }) + } + const a = Fp.sqrt(Fp.sub(Fp.sqr(c0), Fp.mul(Fp.sqr(c1), FpNONRESIDUE))) + let d = Fp.mul(Fp.add(a, c0), Fpdiv2) + const legendre = FpLegendre(Fp, d) + // -1, Quadratic non residue + if (!Fp.is0(legendre) && !Fp.eql(legendre, Fp.ONE)) d = Fp.sub(d, a) + const a0 = Fp.sqrt(d) + const candidateSqrt = Fp2.create({ c0: a0, c1: Fp.div(Fp.mul(c1, Fpdiv2), a0) }) + if (!Fp2.eql(Fp2.sqr(candidateSqrt), num)) throw new Error('Cannot find square root') + // Normalize root: at this point candidateSqrt ** 2 = num, but also -candidateSqrt ** 2 = num + const x1 = candidateSqrt + const x2 = Fp2.neg(x1) + const { re: re1, im: im1 } = Fp2.reim(x1) + const { re: re2, im: im2 } = Fp2.reim(x2) + if (im1 > im2 || (im1 === im2 && re1 > re2)) return x1 + return x2 + }, + // Same as sgn0_m_eq_2 in RFC 9380 + isOdd: (x: Fp2) => { + const { re: x0, im: x1 } = Fp2.reim(x) + const sign_0 = x0 % _2n + const zero_0 = x0 === _0n + const sign_1 = x1 % _2n + return BigInt(sign_0 || (zero_0 && sign_1)) == _1n + }, + // Bytes util + fromBytes(b: Uint8Array): Fp2 { + if (b.length !== Fp2.BYTES) throw new Error('fromBytes invalid length=' + b.length) + return { c0: Fp.fromBytes(b.subarray(0, Fp.BYTES)), c1: Fp.fromBytes(b.subarray(Fp.BYTES)) } + }, + toBytes: ({ c0, c1 }) => concatBytes(Fp.toBytes(c0), Fp.toBytes(c1)), + cmov: ({ c0, c1 }, { c0: r0, c1: r1 }, c) => ({ + c0: Fp.cmov(c0, r0, c), + c1: Fp.cmov(c1, r1, c), + }), + reim: ({ c0, c1 }) => ({ re: c0, im: c1 }), + // multiply by u + 1 + mulByNonresidue: ({ c0, c1 }) => Fp2.mul({ c0, c1 }, Fp2Nonresidue), + mulByB: opts.Fp2mulByB, + fromBigTuple: Fp2fromBigTuple, + frobeniusMap: ({ c0, c1 }, power: number): Fp2 => ({ + c0, + c1: Fp.mul(c1, FP2_FROBENIUS_COEFFICIENTS[power % 2]), + }), + } + // Fp6 + const Fp6Add = ({ c0, c1, c2 }: Fp6, { c0: r0, c1: r1, c2: r2 }: Fp6) => ({ + c0: Fp2.add(c0, r0), + c1: Fp2.add(c1, r1), + c2: Fp2.add(c2, r2), + }) + const Fp6Subtract = ({ c0, c1, c2 }: Fp6, { c0: r0, c1: r1, c2: r2 }: Fp6) => ({ + c0: Fp2.sub(c0, r0), + c1: Fp2.sub(c1, r1), + c2: Fp2.sub(c2, r2), + }) + const Fp6Multiply = ({ c0, c1, c2 }: Fp6, rhs: Fp6 | bigint) => { + if (typeof rhs === 'bigint') { + return { + c0: Fp2.mul(c0, rhs), + c1: Fp2.mul(c1, rhs), + c2: Fp2.mul(c2, rhs), + } + } + const { c0: r0, c1: r1, c2: r2 } = rhs + const t0 = Fp2.mul(c0, r0) // c0 * o0 + const t1 = Fp2.mul(c1, r1) // c1 * o1 + const t2 = Fp2.mul(c2, r2) // c2 * o2 + return { + // t0 + (c1 + c2) * (r1 * r2) - (T1 + T2) * (u + 1) + c0: Fp2.add( + t0, + Fp2.mulByNonresidue(Fp2.sub(Fp2.mul(Fp2.add(c1, c2), Fp2.add(r1, r2)), Fp2.add(t1, t2))), + ), + // (c0 + c1) * (r0 + r1) - (T0 + T1) + T2 * (u + 1) + c1: Fp2.add( + Fp2.sub(Fp2.mul(Fp2.add(c0, c1), Fp2.add(r0, r1)), Fp2.add(t0, t1)), + Fp2.mulByNonresidue(t2), + ), + // T1 + (c0 + c2) * (r0 + r2) - T0 + T2 + c2: Fp2.sub(Fp2.add(t1, Fp2.mul(Fp2.add(c0, c2), Fp2.add(r0, r2))), Fp2.add(t0, t2)), + } + } + const Fp6Square = ({ c0, c1, c2 }: Fp6) => { + let t0 = Fp2.sqr(c0) // c0² + let t1 = Fp2.mul(Fp2.mul(c0, c1), _2n) // 2 * c0 * c1 + let t3 = Fp2.mul(Fp2.mul(c1, c2), _2n) // 2 * c1 * c2 + let t4 = Fp2.sqr(c2) // c2² + return { + c0: Fp2.add(Fp2.mulByNonresidue(t3), t0), // T3 * (u + 1) + T0 + c1: Fp2.add(Fp2.mulByNonresidue(t4), t1), // T4 * (u + 1) + T1 + // T1 + (c0 - c1 + c2)² + T3 - T0 - T4 + c2: Fp2.sub(Fp2.sub(Fp2.add(Fp2.add(t1, Fp2.sqr(Fp2.add(Fp2.sub(c0, c1), c2))), t3), t0), t4), + } + } + type Fp6Utils = { + fromBigSix: (tuple: BigintSix) => Fp6 + mulByNonresidue: (num: Fp6) => Fp6 + frobeniusMap(num: Fp6, power: number): Fp6 + mul1(num: Fp6, b1: Fp2): Fp6 + mul01(num: Fp6, b0: Fp2, b1: Fp2): Fp6 + mulByFp2(lhs: Fp6, rhs: Fp2): Fp6 + } + + const [FP6_FROBENIUS_COEFFICIENTS_1, FP6_FROBENIUS_COEFFICIENTS_2] = calcFrobeniusCoefficients( + Fp2, + Fp2Nonresidue, + Fp.ORDER, + 6, + 2, + 3, + ) + + const Fp6: mod.IField & Fp6Utils = { + ORDER: Fp2.ORDER, // TODO: unused, but need to verify + isLE: Fp2.isLE, + BITS: 3 * Fp2.BITS, + BYTES: 3 * Fp2.BYTES, + MASK: bitMask(3 * Fp2.BITS), + ZERO: { c0: Fp2.ZERO, c1: Fp2.ZERO, c2: Fp2.ZERO }, + ONE: { c0: Fp2.ONE, c1: Fp2.ZERO, c2: Fp2.ZERO }, + create: (num) => num, + isValid: ({ c0, c1, c2 }) => Fp2.isValid(c0) && Fp2.isValid(c1) && Fp2.isValid(c2), + is0: ({ c0, c1, c2 }) => Fp2.is0(c0) && Fp2.is0(c1) && Fp2.is0(c2), + neg: ({ c0, c1, c2 }) => ({ c0: Fp2.neg(c0), c1: Fp2.neg(c1), c2: Fp2.neg(c2) }), + eql: ({ c0, c1, c2 }, { c0: r0, c1: r1, c2: r2 }) => + Fp2.eql(c0, r0) && Fp2.eql(c1, r1) && Fp2.eql(c2, r2), + sqrt: notImplemented, + // Do we need division by bigint at all? Should be done via order: + div: (lhs, rhs) => + Fp6.mul(lhs, typeof rhs === 'bigint' ? Fp.inv(Fp.create(rhs)) : Fp6.inv(rhs)), + pow: (num, power) => mod.FpPow(Fp6, num, power), + invertBatch: (nums) => mod.FpInvertBatch(Fp6, nums), + // Normalized + add: Fp6Add, + sub: Fp6Subtract, + mul: Fp6Multiply, + sqr: Fp6Square, + // NonNormalized stuff + addN: Fp6Add, + subN: Fp6Subtract, + mulN: Fp6Multiply, + sqrN: Fp6Square, + + inv: ({ c0, c1, c2 }) => { + let t0 = Fp2.sub(Fp2.sqr(c0), Fp2.mulByNonresidue(Fp2.mul(c2, c1))) // c0² - c2 * c1 * (u + 1) + let t1 = Fp2.sub(Fp2.mulByNonresidue(Fp2.sqr(c2)), Fp2.mul(c0, c1)) // c2² * (u + 1) - c0 * c1 + let t2 = Fp2.sub(Fp2.sqr(c1), Fp2.mul(c0, c2)) // c1² - c0 * c2 + // 1/(((c2 * T1 + c1 * T2) * v) + c0 * T0) + let t4 = Fp2.inv( + Fp2.add(Fp2.mulByNonresidue(Fp2.add(Fp2.mul(c2, t1), Fp2.mul(c1, t2))), Fp2.mul(c0, t0)), + ) + return { c0: Fp2.mul(t4, t0), c1: Fp2.mul(t4, t1), c2: Fp2.mul(t4, t2) } + }, + // Bytes utils + fromBytes: (b: Uint8Array): Fp6 => { + if (b.length !== Fp6.BYTES) throw new Error('fromBytes invalid length=' + b.length) + return { + c0: Fp2.fromBytes(b.subarray(0, Fp2.BYTES)), + c1: Fp2.fromBytes(b.subarray(Fp2.BYTES, 2 * Fp2.BYTES)), + c2: Fp2.fromBytes(b.subarray(2 * Fp2.BYTES)), + } + }, + toBytes: ({ c0, c1, c2 }): Uint8Array => + concatBytes(Fp2.toBytes(c0), Fp2.toBytes(c1), Fp2.toBytes(c2)), + cmov: ({ c0, c1, c2 }: Fp6, { c0: r0, c1: r1, c2: r2 }: Fp6, c) => ({ + c0: Fp2.cmov(c0, r0, c), + c1: Fp2.cmov(c1, r1, c), + c2: Fp2.cmov(c2, r2, c), + }), + fromBigSix: (t: BigintSix): Fp6 => { + if (!Array.isArray(t) || t.length !== 6) throw new Error('invalid Fp6 usage') + return { + c0: Fp2.fromBigTuple(t.slice(0, 2)), + c1: Fp2.fromBigTuple(t.slice(2, 4)), + c2: Fp2.fromBigTuple(t.slice(4, 6)), + } + }, + frobeniusMap: ({ c0, c1, c2 }, power: number) => ({ + c0: Fp2.frobeniusMap(c0, power), + c1: Fp2.mul(Fp2.frobeniusMap(c1, power), FP6_FROBENIUS_COEFFICIENTS_1[power % 6]), + c2: Fp2.mul(Fp2.frobeniusMap(c2, power), FP6_FROBENIUS_COEFFICIENTS_2[power % 6]), + }), + mulByFp2: ({ c0, c1, c2 }, rhs: Fp2): Fp6 => ({ + c0: Fp2.mul(c0, rhs), + c1: Fp2.mul(c1, rhs), + c2: Fp2.mul(c2, rhs), + }), + mulByNonresidue: ({ c0, c1, c2 }) => ({ c0: Fp2.mulByNonresidue(c2), c1: c0, c2: c1 }), + // Sparse multiplication + mul1: ({ c0, c1, c2 }, b1: Fp2): Fp6 => ({ + c0: Fp2.mulByNonresidue(Fp2.mul(c2, b1)), + c1: Fp2.mul(c0, b1), + c2: Fp2.mul(c1, b1), + }), + // Sparse multiplication + mul01({ c0, c1, c2 }, b0: Fp2, b1: Fp2): Fp6 { + let t0 = Fp2.mul(c0, b0) // c0 * b0 + let t1 = Fp2.mul(c1, b1) // c1 * b1 + return { + // ((c1 + c2) * b1 - T1) * (u + 1) + T0 + c0: Fp2.add(Fp2.mulByNonresidue(Fp2.sub(Fp2.mul(Fp2.add(c1, c2), b1), t1)), t0), + // (b0 + b1) * (c0 + c1) - T0 - T1 + c1: Fp2.sub(Fp2.sub(Fp2.mul(Fp2.add(b0, b1), Fp2.add(c0, c1)), t0), t1), + // (c0 + c2) * b0 - T0 + T1 + c2: Fp2.add(Fp2.sub(Fp2.mul(Fp2.add(c0, c2), b0), t0), t1), + } + }, + } + + // Fp12 + const FP12_FROBENIUS_COEFFICIENTS = calcFrobeniusCoefficients( + Fp2, + Fp2Nonresidue, + Fp.ORDER, + 12, + 1, + 6, + )[0] + + const Fp12Add = ({ c0, c1 }: Fp12, { c0: r0, c1: r1 }: Fp12) => ({ + c0: Fp6.add(c0, r0), + c1: Fp6.add(c1, r1), + }) + const Fp12Subtract = ({ c0, c1 }: Fp12, { c0: r0, c1: r1 }: Fp12) => ({ + c0: Fp6.sub(c0, r0), + c1: Fp6.sub(c1, r1), + }) + const Fp12Multiply = ({ c0, c1 }: Fp12, rhs: Fp12 | bigint) => { + if (typeof rhs === 'bigint') return { c0: Fp6.mul(c0, rhs), c1: Fp6.mul(c1, rhs) } + let { c0: r0, c1: r1 } = rhs + let t1 = Fp6.mul(c0, r0) // c0 * r0 + let t2 = Fp6.mul(c1, r1) // c1 * r1 + return { + c0: Fp6.add(t1, Fp6.mulByNonresidue(t2)), // T1 + T2 * v + // (c0 + c1) * (r0 + r1) - (T1 + T2) + c1: Fp6.sub(Fp6.mul(Fp6.add(c0, c1), Fp6.add(r0, r1)), Fp6.add(t1, t2)), + } + } + const Fp12Square = ({ c0, c1 }: Fp12) => { + let ab = Fp6.mul(c0, c1) // c0 * c1 + return { + // (c1 * v + c0) * (c0 + c1) - AB - AB * v + c0: Fp6.sub( + Fp6.sub(Fp6.mul(Fp6.add(Fp6.mulByNonresidue(c1), c0), Fp6.add(c0, c1)), ab), + Fp6.mulByNonresidue(ab), + ), + c1: Fp6.add(ab, ab), + } // AB + AB + } + function Fp4Square(a: Fp2, b: Fp2): { first: Fp2; second: Fp2 } { + const a2 = Fp2.sqr(a) + const b2 = Fp2.sqr(b) + return { + first: Fp2.add(Fp2.mulByNonresidue(b2), a2), // b² * Nonresidue + a² + second: Fp2.sub(Fp2.sub(Fp2.sqr(Fp2.add(a, b)), a2), b2), // (a + b)² - a² - b² + } + } + type Fp12Utils = { + fromBigTwelve: (t: BigintTwelve) => Fp12 + frobeniusMap(num: Fp12, power: number): Fp12 + mul014(num: Fp12, o0: Fp2, o1: Fp2, o4: Fp2): Fp12 + mul034(num: Fp12, o0: Fp2, o3: Fp2, o4: Fp2): Fp12 + mulByFp2(lhs: Fp12, rhs: Fp2): Fp12 + conjugate(num: Fp12): Fp12 + finalExponentiate(num: Fp12): Fp12 + _cyclotomicSquare(num: Fp12): Fp12 + _cyclotomicExp(num: Fp12, n: bigint): Fp12 + } + + const Fp12: mod.IField & Fp12Utils = { + ORDER: Fp2.ORDER, // TODO: unused, but need to verify + isLE: Fp6.isLE, + BITS: 2 * Fp6.BITS, + BYTES: 2 * Fp6.BYTES, + MASK: bitMask(2 * Fp6.BITS), + ZERO: { c0: Fp6.ZERO, c1: Fp6.ZERO }, + ONE: { c0: Fp6.ONE, c1: Fp6.ZERO }, + create: (num) => num, + isValid: ({ c0, c1 }) => Fp6.isValid(c0) && Fp6.isValid(c1), + is0: ({ c0, c1 }) => Fp6.is0(c0) && Fp6.is0(c1), + neg: ({ c0, c1 }) => ({ c0: Fp6.neg(c0), c1: Fp6.neg(c1) }), + eql: ({ c0, c1 }, { c0: r0, c1: r1 }) => Fp6.eql(c0, r0) && Fp6.eql(c1, r1), + sqrt: notImplemented, + inv: ({ c0, c1 }) => { + let t = Fp6.inv(Fp6.sub(Fp6.sqr(c0), Fp6.mulByNonresidue(Fp6.sqr(c1)))) // 1 / (c0² - c1² * v) + return { c0: Fp6.mul(c0, t), c1: Fp6.neg(Fp6.mul(c1, t)) } // ((C0 * T) * T) + (-C1 * T) * w + }, + div: (lhs, rhs) => + Fp12.mul(lhs, typeof rhs === 'bigint' ? Fp.inv(Fp.create(rhs)) : Fp12.inv(rhs)), + pow: (num, power) => mod.FpPow(Fp12, num, power), + invertBatch: (nums) => mod.FpInvertBatch(Fp12, nums), + // Normalized + add: Fp12Add, + sub: Fp12Subtract, + mul: Fp12Multiply, + sqr: Fp12Square, + // NonNormalized stuff + addN: Fp12Add, + subN: Fp12Subtract, + mulN: Fp12Multiply, + sqrN: Fp12Square, + + // Bytes utils + fromBytes: (b: Uint8Array): Fp12 => { + if (b.length !== Fp12.BYTES) throw new Error('fromBytes invalid length=' + b.length) + return { + c0: Fp6.fromBytes(b.subarray(0, Fp6.BYTES)), + c1: Fp6.fromBytes(b.subarray(Fp6.BYTES)), + } + }, + toBytes: ({ c0, c1 }): Uint8Array => concatBytes(Fp6.toBytes(c0), Fp6.toBytes(c1)), + cmov: ({ c0, c1 }, { c0: r0, c1: r1 }, c) => ({ + c0: Fp6.cmov(c0, r0, c), + c1: Fp6.cmov(c1, r1, c), + }), + // Utils + // toString() { + // return '' + 'Fp12(' + this.c0 + this.c1 + '* w'); + // }, + // fromTuple(c: [Fp6, Fp6]) { + // return new Fp12(...c); + // } + fromBigTwelve: (t: BigintTwelve): Fp12 => ({ + c0: Fp6.fromBigSix(t.slice(0, 6) as BigintSix), + c1: Fp6.fromBigSix(t.slice(6, 12) as BigintSix), + }), + // Raises to q**i -th power + frobeniusMap(lhs, power: number) { + const { c0, c1, c2 } = Fp6.frobeniusMap(lhs.c1, power) + const coeff = FP12_FROBENIUS_COEFFICIENTS[power % 12] + return { + c0: Fp6.frobeniusMap(lhs.c0, power), + c1: Fp6.create({ + c0: Fp2.mul(c0, coeff), + c1: Fp2.mul(c1, coeff), + c2: Fp2.mul(c2, coeff), + }), + } + }, + mulByFp2: ({ c0, c1 }, rhs: Fp2): Fp12 => ({ + c0: Fp6.mulByFp2(c0, rhs), + c1: Fp6.mulByFp2(c1, rhs), + }), + conjugate: ({ c0, c1 }): Fp12 => ({ c0, c1: Fp6.neg(c1) }), + // Sparse multiplication + mul014: ({ c0, c1 }, o0: Fp2, o1: Fp2, o4: Fp2) => { + let t0 = Fp6.mul01(c0, o0, o1) + let t1 = Fp6.mul1(c1, o4) + return { + c0: Fp6.add(Fp6.mulByNonresidue(t1), t0), // T1 * v + T0 + // (c1 + c0) * [o0, o1+o4] - T0 - T1 + c1: Fp6.sub(Fp6.sub(Fp6.mul01(Fp6.add(c1, c0), o0, Fp2.add(o1, o4)), t0), t1), + } + }, + mul034: ({ c0, c1 }, o0: Fp2, o3: Fp2, o4: Fp2) => { + const a = Fp6.create({ + c0: Fp2.mul(c0.c0, o0), + c1: Fp2.mul(c0.c1, o0), + c2: Fp2.mul(c0.c2, o0), + }) + const b = Fp6.mul01(c1, o3, o4) + const e = Fp6.mul01(Fp6.add(c0, c1), Fp2.add(o0, o3), o4) + return { + c0: Fp6.add(Fp6.mulByNonresidue(b), a), + c1: Fp6.sub(e, Fp6.add(a, b)), + } + }, + + // A cyclotomic group is a subgroup of Fp^n defined by + // GΦₙ(p) = {α ∈ Fpⁿ : α^Φₙ(p) = 1} + // The result of any pairing is in a cyclotomic subgroup + // https://eprint.iacr.org/2009/565.pdf + _cyclotomicSquare: opts.Fp12cyclotomicSquare, + _cyclotomicExp: opts.Fp12cyclotomicExp, + // https://eprint.iacr.org/2010/354.pdf + // https://eprint.iacr.org/2009/565.pdf + finalExponentiate: opts.Fp12finalExponentiate, + } + + return { Fp, Fp2, Fp6, Fp4Square, Fp12 } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/utils.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/utils.ts new file mode 100644 index 00000000000..de81eaef58b --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/utils.ts @@ -0,0 +1,378 @@ +/** + * Hex, bytes and number utilities. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ + +// 100 lines of code in the file are duplicated from noble-hashes (utils). +// This is OK: `abstract` directory does not use noble-hashes. +// User may opt-in into using different hashing library. This way, noble-hashes +// won't be included into their bundle. +const _0n = /* @__PURE__ */ BigInt(0) +const _1n = /* @__PURE__ */ BigInt(1) +export type Hex = Uint8Array | string // hex strings are accepted for simplicity +export type PrivKey = Hex | bigint // bigints are accepted to ease learning curve +export type CHash = { + (message: Uint8Array | string): Uint8Array + blockLen: number + outputLen: number + create(opts?: { dkLen?: number }): any // For shake +} +export type FHash = (message: Uint8Array | string) => Uint8Array + +export function isBytes(a: unknown): a is Uint8Array { + return a instanceof Uint8Array || (ArrayBuffer.isView(a) && a.constructor.name === 'Uint8Array') +} + +export function abytes(item: unknown): void { + if (!isBytes(item)) throw new Error('Uint8Array expected') +} + +export function abool(title: string, value: boolean): void { + if (typeof value !== 'boolean') throw new Error(title + ' boolean expected, got ' + value) +} + +export function numberToHexUnpadded(num: number | bigint): string { + const hex = num.toString(16) + return hex.length & 1 ? '0' + hex : hex +} + +export function hexToNumber(hex: string): bigint { + if (typeof hex !== 'string') throw new Error('hex string expected, got ' + typeof hex) + return hex === '' ? _0n : BigInt('0x' + hex) // Big Endian +} + +// Built-in hex conversion https://caniuse.com/mdn-javascript_builtins_uint8array_fromhex +const hasHexBuiltin: boolean = + // @ts-ignore + typeof Uint8Array.from([]).toHex === 'function' && typeof Uint8Array.fromHex === 'function' + +// Array where index 0xf0 (240) is mapped to string 'f0' +const hexes = /* @__PURE__ */ Array.from({ length: 256 }, (_, i) => i.toString(16).padStart(2, '0')) + +/** + * Convert byte array to hex string. Uses built-in function, when available. + * @example bytesToHex(Uint8Array.from([0xca, 0xfe, 0x01, 0x23])) // 'cafe0123' + */ +export function bytesToHex(bytes: Uint8Array): string { + abytes(bytes) + // @ts-ignore + if (hasHexBuiltin) return bytes.toHex() + // pre-caching improves the speed 6x + let hex = '' + for (let i = 0; i < bytes.length; i++) { + hex += hexes[bytes[i]] + } + return hex +} + +// We use optimized technique to convert hex string to byte array +const asciis = { _0: 48, _9: 57, A: 65, F: 70, a: 97, f: 102 } as const +function asciiToBase16(ch: number): number | undefined { + if (ch >= asciis._0 && ch <= asciis._9) return ch - asciis._0 // '2' => 50-48 + if (ch >= asciis.A && ch <= asciis.F) return ch - (asciis.A - 10) // 'B' => 66-(65-10) + if (ch >= asciis.a && ch <= asciis.f) return ch - (asciis.a - 10) // 'b' => 98-(97-10) + return +} + +/** + * Convert hex string to byte array. Uses built-in function, when available. + * @example hexToBytes('cafe0123') // Uint8Array.from([0xca, 0xfe, 0x01, 0x23]) + */ +export function hexToBytes(hex: string): Uint8Array { + if (typeof hex !== 'string') throw new Error('hex string expected, got ' + typeof hex) + // @ts-ignore + if (hasHexBuiltin) return Uint8Array.fromHex(hex) + const hl = hex.length + const al = hl / 2 + if (hl % 2) throw new Error('hex string expected, got unpadded hex of length ' + hl) + const array = new Uint8Array(al) + for (let ai = 0, hi = 0; ai < al; ai++, hi += 2) { + const n1 = asciiToBase16(hex.charCodeAt(hi)) + const n2 = asciiToBase16(hex.charCodeAt(hi + 1)) + if (n1 === undefined || n2 === undefined) { + const char = hex[hi] + hex[hi + 1] + throw new Error('hex string expected, got non-hex character "' + char + '" at index ' + hi) + } + array[ai] = n1 * 16 + n2 // multiply first octet, e.g. 'a3' => 10*16+3 => 160 + 3 => 163 + } + return array +} + +// BE: Big Endian, LE: Little Endian +export function bytesToNumberBE(bytes: Uint8Array): bigint { + return hexToNumber(bytesToHex(bytes)) +} +export function bytesToNumberLE(bytes: Uint8Array): bigint { + abytes(bytes) + return hexToNumber(bytesToHex(Uint8Array.from(bytes).reverse())) +} + +export function numberToBytesBE(n: number | bigint, len: number): Uint8Array { + return hexToBytes(n.toString(16).padStart(len * 2, '0')) +} +export function numberToBytesLE(n: number | bigint, len: number): Uint8Array { + return numberToBytesBE(n, len).reverse() +} +// Unpadded, rarely used +export function numberToVarBytesBE(n: number | bigint): Uint8Array { + return hexToBytes(numberToHexUnpadded(n)) +} + +/** + * Takes hex string or Uint8Array, converts to Uint8Array. + * Validates output length. + * Will throw error for other types. + * @param title descriptive title for an error e.g. 'private key' + * @param hex hex string or Uint8Array + * @param expectedLength optional, will compare to result array's length + * @returns + */ +export function ensureBytes(title: string, hex: Hex, expectedLength?: number): Uint8Array { + let res: Uint8Array + if (typeof hex === 'string') { + try { + res = hexToBytes(hex) + } catch (e) { + throw new Error(title + ' must be hex string or Uint8Array, cause: ' + e) + } + } else if (isBytes(hex)) { + // Uint8Array.from() instead of hash.slice() because node.js Buffer + // is instance of Uint8Array, and its slice() creates **mutable** copy + res = Uint8Array.from(hex) + } else { + throw new Error(title + ' must be hex string or Uint8Array') + } + const len = res.length + if (typeof expectedLength === 'number' && len !== expectedLength) + throw new Error(title + ' of length ' + expectedLength + ' expected, got ' + len) + return res +} + +/** + * Copies several Uint8Arrays into one. + */ +export function concatBytes(...arrays: Uint8Array[]): Uint8Array { + let sum = 0 + for (let i = 0; i < arrays.length; i++) { + const a = arrays[i] + abytes(a) + sum += a.length + } + const res = new Uint8Array(sum) + for (let i = 0, pad = 0; i < arrays.length; i++) { + const a = arrays[i] + res.set(a, pad) + pad += a.length + } + return res +} + +// Compares 2 u8a-s in kinda constant time +export function equalBytes(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} + +// Global symbols in both browsers and Node.js since v11 +// See https://github.com/microsoft/TypeScript/issues/31535 +declare const TextEncoder: any + +/** + * @example utf8ToBytes('abc') // new Uint8Array([97, 98, 99]) + */ +export function utf8ToBytes(str: string): Uint8Array { + if (typeof str !== 'string') throw new Error('string expected') + return new Uint8Array(new TextEncoder().encode(str)) // https://bugzil.la/1681809 +} + +// Is positive bigint +const isPosBig = (n: bigint) => typeof n === 'bigint' && _0n <= n + +export function inRange(n: bigint, min: bigint, max: bigint): boolean { + return isPosBig(n) && isPosBig(min) && isPosBig(max) && min <= n && n < max +} + +/** + * Asserts min <= n < max. NOTE: It's < max and not <= max. + * @example + * aInRange('x', x, 1n, 256n); // would assume x is in (1n..255n) + */ +export function aInRange(title: string, n: bigint, min: bigint, max: bigint): void { + // Why min <= n < max and not a (min < n < max) OR b (min <= n <= max)? + // consider P=256n, min=0n, max=P + // - a for min=0 would require -1: `inRange('x', x, -1n, P)` + // - b would commonly require subtraction: `inRange('x', x, 0n, P - 1n)` + // - our way is the cleanest: `inRange('x', x, 0n, P) + if (!inRange(n, min, max)) + throw new Error('expected valid ' + title + ': ' + min + ' <= n < ' + max + ', got ' + n) +} + +// Bit operations + +/** + * Calculates amount of bits in a bigint. + * Same as `n.toString(2).length` + */ +export function bitLen(n: bigint): number { + let len + for (len = 0; n > _0n; n >>= _1n, len += 1); + return len +} + +/** + * Gets single bit at position. + * NOTE: first bit position is 0 (same as arrays) + * Same as `!!+Array.from(n.toString(2)).reverse()[pos]` + */ +export function bitGet(n: bigint, pos: number): bigint { + return (n >> BigInt(pos)) & _1n +} + +/** + * Sets single bit at position. + */ +export function bitSet(n: bigint, pos: number, value: boolean): bigint { + return n | ((value ? _1n : _0n) << BigInt(pos)) +} + +/** + * Calculate mask for N bits. Not using ** operator with bigints because of old engines. + * Same as BigInt(`0b${Array(i).fill('1').join('')}`) + */ +export const bitMask = (n: number): bigint => (_1n << BigInt(n)) - _1n + +// DRBG + +const u8n = (len: number) => new Uint8Array(len) // creates Uint8Array +const u8fr = (arr: ArrayLike) => Uint8Array.from(arr) // another shortcut +type Pred = (v: Uint8Array) => T | undefined +/** + * Minimal HMAC-DRBG from NIST 800-90 for RFC6979 sigs. + * @returns function that will call DRBG until 2nd arg returns something meaningful + * @example + * const drbg = createHmacDRBG(32, 32, hmac); + * drbg(seed, bytesToKey); // bytesToKey must return Key or undefined + */ +export function createHmacDrbg( + hashLen: number, + qByteLen: number, + hmacFn: (key: Uint8Array, ...messages: Uint8Array[]) => Uint8Array, +): (seed: Uint8Array, predicate: Pred) => T { + if (typeof hashLen !== 'number' || hashLen < 2) throw new Error('hashLen must be a number') + if (typeof qByteLen !== 'number' || qByteLen < 2) throw new Error('qByteLen must be a number') + if (typeof hmacFn !== 'function') throw new Error('hmacFn must be a function') + // Step B, Step C: set hashLen to 8*ceil(hlen/8) + let v = u8n(hashLen) // Minimal non-full-spec HMAC-DRBG from NIST 800-90 for RFC6979 sigs. + let k = u8n(hashLen) // Steps B and C of RFC6979 3.2: set hashLen, in our case always same + let i = 0 // Iterations counter, will throw when over 1000 + const reset = () => { + v.fill(1) + k.fill(0) + i = 0 + } + const h = (...b: Uint8Array[]) => hmacFn(k, v, ...b) // hmac(k)(v, ...values) + const reseed = (seed = u8n(0)) => { + // HMAC-DRBG reseed() function. Steps D-G + k = h(u8fr([0x00]), seed) // k = hmac(k || v || 0x00 || seed) + v = h() // v = hmac(k || v) + if (seed.length === 0) return + k = h(u8fr([0x01]), seed) // k = hmac(k || v || 0x01 || seed) + v = h() // v = hmac(k || v) + } + const gen = () => { + // HMAC-DRBG generate() function + if (i++ >= 1000) throw new Error('drbg: tried 1000 values') + let len = 0 + const out: Uint8Array[] = [] + while (len < qByteLen) { + v = h() + const sl = v.slice() + out.push(sl) + len += v.length + } + return concatBytes(...out) + } + const genUntil = (seed: Uint8Array, pred: Pred): T => { + reset() + reseed(seed) // Steps D-G + let res: T | undefined = undefined // Step H: grind until k is in [1..n-1] + while (!(res = pred(gen()))) reseed() + reset() + return res + } + return genUntil +} + +// Validating curves and fields + +const validatorFns = { + bigint: (val: any): boolean => typeof val === 'bigint', + function: (val: any): boolean => typeof val === 'function', + boolean: (val: any): boolean => typeof val === 'boolean', + string: (val: any): boolean => typeof val === 'string', + stringOrUint8Array: (val: any): boolean => typeof val === 'string' || isBytes(val), + isSafeInteger: (val: any): boolean => Number.isSafeInteger(val), + array: (val: any): boolean => Array.isArray(val), + field: (val: any, object: any): any => (object as any).Fp.isValid(val), + hash: (val: any): boolean => typeof val === 'function' && Number.isSafeInteger(val.outputLen), +} as const +type Validator = keyof typeof validatorFns +type ValMap> = { [K in keyof T]?: Validator } +// type Record = { [P in K]: T; } + +export function validateObject>( + object: T, + validators: ValMap, + optValidators: ValMap = {}, +): T { + const checkField = (fieldName: keyof T, type: Validator, isOptional: boolean) => { + const checkVal = validatorFns[type] + if (typeof checkVal !== 'function') throw new Error('invalid validator function') + + const val = object[fieldName as keyof typeof object] + if (isOptional && val === undefined) return + if (!checkVal(val, object)) { + throw new Error( + 'param ' + String(fieldName) + ' is invalid. Expected ' + type + ', got ' + val, + ) + } + } + for (const [fieldName, type] of Object.entries(validators)) checkField(fieldName, type!, false) + for (const [fieldName, type] of Object.entries(optValidators)) checkField(fieldName, type!, true) + return object +} +// validate type tests +// const o: { a: number; b: number; c: number } = { a: 1, b: 5, c: 6 }; +// const z0 = validateObject(o, { a: 'isSafeInteger' }, { c: 'bigint' }); // Ok! +// // Should fail type-check +// const z1 = validateObject(o, { a: 'tmp' }, { c: 'zz' }); +// const z2 = validateObject(o, { a: 'isSafeInteger' }, { c: 'zz' }); +// const z3 = validateObject(o, { test: 'boolean', z: 'bug' }); +// const z4 = validateObject(o, { a: 'boolean', z: 'bug' }); + +/** + * throws not implemented error + */ +export const notImplemented = (): never => { + throw new Error('not implemented') +} + +/** + * Memoizes (caches) computation result. + * Uses WeakMap: the value is going auto-cleaned by GC after last reference is removed. + */ +export function memoized( + fn: (arg: T, ...args: O) => R, +): (arg: T, ...args: O) => R { + const map = new WeakMap() + return (arg: T, ...args: O): R => { + const val = map.get(arg) + if (val !== undefined) return val + const computed = fn(arg, ...args) + map.set(arg, computed) + return computed + } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/weierstrass.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/weierstrass.ts new file mode 100644 index 00000000000..b713756adbe --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/abstract/weierstrass.ts @@ -0,0 +1,1419 @@ +/** + * Short Weierstrass curve methods. The formula is: y² = x³ + ax + b. + * + * ### Parameters + * + * To initialize a weierstrass curve, one needs to pass following params: + * + * * a: formula param + * * b: formula param + * * Fp: finite Field over which we'll do calculations. Can be complex (Fp2, Fp12) + * * n: Curve prime subgroup order, total count of valid points in the field + * * Gx: Base point (x, y) aka generator point x coordinate + * * Gy: ...y coordinate + * * h: cofactor, usually 1. h*n = curve group order (n is only subgroup order) + * * lowS: whether to enable (default) or disable "low-s" non-malleable signatures + * + * ### Design rationale for types + * + * * Interaction between classes from different curves should fail: + * `k256.Point.BASE.add(p256.Point.BASE)` + * * For this purpose we want to use `instanceof` operator, which is fast and works during runtime + * * Different calls of `curve()` would return different classes - + * `curve(params) !== curve(params)`: if somebody decided to monkey-patch their curve, + * it won't affect others + * + * TypeScript can't infer types for classes created inside a function. Classes is one instance + * of nominative types in TypeScript and interfaces only check for shape, so it's hard to create + * unique type for every function call. + * + * We can use generic types via some param, like curve opts, but that would: + * 1. Enable interaction between `curve(params)` and `curve(params)` (curves of same params) + * which is hard to debug. + * 2. Params can be generic and we can't enforce them to be constant value: + * if somebody creates curve from non-constant params, + * it would be allowed to interact with other curves with non-constant params + * + * @todo https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-7.html#unique-symbol + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +// prettier-ignore +import { + type AffinePoint, + type BasicCurve, + type Group, + type GroupConstructor, + pippenger, + validateBasic, + wNAF, +} from './curve.ts' +// prettier-ignore +import { + Field, + type IField, + getMinHashLength, + invert, + mapHashToField, + mod, + validateField, +} from './modular.ts' +// prettier-ignore +import { + type CHash, + type Hex, + type PrivKey, + aInRange, + abool, + bitMask, + bytesToHex, + bytesToNumberBE, + concatBytes, + createHmacDrbg, + ensureBytes, + hexToBytes, + inRange, + isBytes, + memoized, + numberToBytesBE, + numberToHexUnpadded, + validateObject, +} from './utils.ts' + +export type { AffinePoint } +type HmacFnSync = (key: Uint8Array, ...messages: Uint8Array[]) => Uint8Array +type EndomorphismOpts = { + beta: bigint + splitScalar: (k: bigint) => { k1neg: boolean; k1: bigint; k2neg: boolean; k2: bigint } +} +export type BasicWCurve = BasicCurve & { + // Params: a, b + a: T + b: T + + // Optional params + allowedPrivateKeyLengths?: readonly number[] // for P521 + wrapPrivateKey?: boolean // bls12-381 requires mod(n) instead of rejecting keys >= n + endo?: EndomorphismOpts // Endomorphism options for Koblitz curves + // When a cofactor != 1, there can be an effective methods to: + // 1. Determine whether a point is torsion-free + isTorsionFree?: (c: ProjConstructor, point: ProjPointType) => boolean + // 2. Clear torsion component + clearCofactor?: (c: ProjConstructor, point: ProjPointType) => ProjPointType +} + +export type Entropy = Hex | boolean +export type SignOpts = { lowS?: boolean; extraEntropy?: Entropy; prehash?: boolean } +export type VerOpts = { lowS?: boolean; prehash?: boolean; format?: 'compact' | 'der' | undefined } + +function validateSigVerOpts(opts: SignOpts | VerOpts) { + if (opts.lowS !== undefined) abool('lowS', opts.lowS) + if (opts.prehash !== undefined) abool('prehash', opts.prehash) +} + +// Instance for 3d XYZ points +export interface ProjPointType extends Group> { + readonly px: T + readonly py: T + readonly pz: T + get x(): T + get y(): T + toAffine(iz?: T): AffinePoint + toHex(isCompressed?: boolean): string + toRawBytes(isCompressed?: boolean): Uint8Array + + assertValidity(): void + hasEvenY(): boolean + multiplyUnsafe(scalar: bigint): ProjPointType + multiplyAndAddUnsafe(Q: ProjPointType, a: bigint, b: bigint): ProjPointType | undefined + isTorsionFree(): boolean + clearCofactor(): ProjPointType + _setWindowSize(windowSize: number): void +} +// Static methods for 3d XYZ points +export interface ProjConstructor extends GroupConstructor> { + new (x: T, y: T, z: T): ProjPointType + fromAffine(p: AffinePoint): ProjPointType + fromHex(hex: Hex): ProjPointType + fromPrivateKey(privateKey: PrivKey): ProjPointType + normalizeZ(points: ProjPointType[]): ProjPointType[] + msm(points: ProjPointType[], scalars: bigint[]): ProjPointType +} + +export type CurvePointsType = BasicWCurve & { + // Bytes + fromBytes?: (bytes: Uint8Array) => AffinePoint + toBytes?: (c: ProjConstructor, point: ProjPointType, isCompressed: boolean) => Uint8Array +} + +export type CurvePointsTypeWithLength = Readonly< + CurvePointsType & { nByteLength: number; nBitLength: number } +> + +function validatePointOpts(curve: CurvePointsType): CurvePointsTypeWithLength { + const opts = validateBasic(curve) + validateObject( + opts, + { + a: 'field', + b: 'field', + }, + { + allowedPrivateKeyLengths: 'array', + wrapPrivateKey: 'boolean', + isTorsionFree: 'function', + clearCofactor: 'function', + allowInfinityPoint: 'boolean', + fromBytes: 'function', + toBytes: 'function', + }, + ) + const { endo, Fp, a } = opts + if (endo) { + if (!Fp.eql(a, Fp.ZERO)) { + throw new Error('invalid endomorphism, can only be defined for Koblitz curves that have a=0') + } + if ( + typeof endo !== 'object' || + typeof endo.beta !== 'bigint' || + typeof endo.splitScalar !== 'function' + ) { + throw new Error('invalid endomorphism, expected beta: bigint and splitScalar: function') + } + } + return Object.freeze({ ...opts } as const) +} + +export type CurvePointsRes = { + CURVE: ReturnType> + ProjectivePoint: ProjConstructor + normPrivateKeyToScalar: (key: PrivKey) => bigint + weierstrassEquation: (x: T) => T + isWithinCurveOrder: (num: bigint) => boolean +} + +export class DERErr extends Error { + constructor(m = '') { + super(m) + } +} +export type IDER = { + // asn.1 DER encoding utils + Err: typeof DERErr + // Basic building block is TLV (Tag-Length-Value) + _tlv: { + encode: (tag: number, data: string) => string + // v - value, l - left bytes (unparsed) + decode(tag: number, data: Uint8Array): { v: Uint8Array; l: Uint8Array } + } + // https://crypto.stackexchange.com/a/57734 Leftmost bit of first byte is 'negative' flag, + // since we always use positive integers here. It must always be empty: + // - add zero byte if exists + // - if next byte doesn't have a flag, leading zero is not allowed (minimal encoding) + _int: { + encode(num: bigint): string + decode(data: Uint8Array): bigint + } + toSig(hex: string | Uint8Array): { r: bigint; s: bigint } + hexFromSig(sig: { r: bigint; s: bigint }): string +} +/** + * ASN.1 DER encoding utilities. ASN is very complex & fragile. Format: + * + * [0x30 (SEQUENCE), bytelength, 0x02 (INTEGER), intLength, R, 0x02 (INTEGER), intLength, S] + * + * Docs: https://letsencrypt.org/docs/a-warm-welcome-to-asn1-and-der/, https://luca.ntop.org/Teaching/Appunti/asn1.html + */ +export const DER: IDER = { + // asn.1 DER encoding utils + Err: DERErr, + // Basic building block is TLV (Tag-Length-Value) + _tlv: { + encode: (tag: number, data: string): string => { + const { Err: E } = DER + if (tag < 0 || tag > 256) throw new E('tlv.encode: wrong tag') + if (data.length & 1) throw new E('tlv.encode: unpadded data') + const dataLen = data.length / 2 + const len = numberToHexUnpadded(dataLen) + if ((len.length / 2) & 0b1000_0000) throw new E('tlv.encode: long form length too big') + // length of length with long form flag + const lenLen = dataLen > 127 ? numberToHexUnpadded((len.length / 2) | 0b1000_0000) : '' + const t = numberToHexUnpadded(tag) + return t + lenLen + len + data + }, + // v - value, l - left bytes (unparsed) + decode(tag: number, data: Uint8Array): { v: Uint8Array; l: Uint8Array } { + const { Err: E } = DER + let pos = 0 + if (tag < 0 || tag > 256) throw new E('tlv.encode: wrong tag') + if (data.length < 2 || data[pos++] !== tag) throw new E('tlv.decode: wrong tlv') + const first = data[pos++] + const isLong = !!(first & 0b1000_0000) // First bit of first length byte is flag for short/long form + let length = 0 + if (!isLong) length = first + else { + // Long form: [longFlag(1bit), lengthLength(7bit), length (BE)] + const lenLen = first & 0b0111_1111 + if (!lenLen) throw new E('tlv.decode(long): indefinite length not supported') + if (lenLen > 4) throw new E('tlv.decode(long): byte length is too big') // this will overflow u32 in js + const lengthBytes = data.subarray(pos, pos + lenLen) + if (lengthBytes.length !== lenLen) throw new E('tlv.decode: length bytes not complete') + if (lengthBytes[0] === 0) throw new E('tlv.decode(long): zero leftmost byte') + for (const b of lengthBytes) length = (length << 8) | b + pos += lenLen + if (length < 128) throw new E('tlv.decode(long): not minimal encoding') + } + const v = data.subarray(pos, pos + length) + if (v.length !== length) throw new E('tlv.decode: wrong value length') + return { v, l: data.subarray(pos + length) } + }, + }, + // https://crypto.stackexchange.com/a/57734 Leftmost bit of first byte is 'negative' flag, + // since we always use positive integers here. It must always be empty: + // - add zero byte if exists + // - if next byte doesn't have a flag, leading zero is not allowed (minimal encoding) + _int: { + encode(num: bigint): string { + const { Err: E } = DER + if (num < _0n) throw new E('integer: negative integers are not allowed') + let hex = numberToHexUnpadded(num) + // Pad with zero byte if negative flag is present + if (Number.parseInt(hex[0], 16) & 0b1000) hex = '00' + hex + if (hex.length & 1) throw new E('unexpected DER parsing assertion: unpadded hex') + return hex + }, + decode(data: Uint8Array): bigint { + const { Err: E } = DER + if (data[0] & 0b1000_0000) throw new E('invalid signature integer: negative') + if (data[0] === 0x00 && !(data[1] & 0b1000_0000)) + throw new E('invalid signature integer: unnecessary leading zero') + return bytesToNumberBE(data) + }, + }, + toSig(hex: string | Uint8Array): { r: bigint; s: bigint } { + // parse DER signature + const { Err: E, _int: int, _tlv: tlv } = DER + const data = ensureBytes('signature', hex) + const { v: seqBytes, l: seqLeftBytes } = tlv.decode(0x30, data) + if (seqLeftBytes.length) throw new E('invalid signature: left bytes after parsing') + const { v: rBytes, l: rLeftBytes } = tlv.decode(0x02, seqBytes) + const { v: sBytes, l: sLeftBytes } = tlv.decode(0x02, rLeftBytes) + if (sLeftBytes.length) throw new E('invalid signature: left bytes after parsing') + return { r: int.decode(rBytes), s: int.decode(sBytes) } + }, + hexFromSig(sig: { r: bigint; s: bigint }): string { + const { _tlv: tlv, _int: int } = DER + const rs = tlv.encode(0x02, int.encode(sig.r)) + const ss = tlv.encode(0x02, int.encode(sig.s)) + const seq = rs + ss + return tlv.encode(0x30, seq) + }, +} + +// Be friendly to bad ECMAScript parsers by not using bigint literals +// prettier-ignore +const _0n = BigInt(0), + _1n = BigInt(1), + _2n = BigInt(2), + _3n = BigInt(3), + _4n = BigInt(4) + +export function weierstrassPoints(opts: CurvePointsType): CurvePointsRes { + const CURVE = validatePointOpts(opts) + const { Fp } = CURVE // All curves has same field / group length as for now, but they can differ + const Fn = Field(CURVE.n, CURVE.nBitLength) + + const toBytes = + CURVE.toBytes || + ((_c: ProjConstructor, point: ProjPointType, _isCompressed: boolean) => { + const a = point.toAffine() + return concatBytes(Uint8Array.from([0x04]), Fp.toBytes(a.x), Fp.toBytes(a.y)) + }) + const fromBytes = + CURVE.fromBytes || + ((bytes: Uint8Array) => { + // const head = bytes[0]; + const tail = bytes.subarray(1) + // if (head !== 0x04) throw new Error('Only non-compressed encoding is supported'); + const x = Fp.fromBytes(tail.subarray(0, Fp.BYTES)) + const y = Fp.fromBytes(tail.subarray(Fp.BYTES, 2 * Fp.BYTES)) + return { x, y } + }) + + /** + * y² = x³ + ax + b: Short weierstrass curve formula. Takes x, returns y². + * @returns y² + */ + function weierstrassEquation(x: T): T { + const { a, b } = CURVE + const x2 = Fp.sqr(x) // x * x + const x3 = Fp.mul(x2, x) // x2 * x + return Fp.add(Fp.add(x3, Fp.mul(x, a)), b) // x3 + a * x + b + } + // Validate whether the passed curve params are valid. + // We check if curve equation works for generator point. + // `assertValidity()` won't work: `isTorsionFree()` is not available at this point in bls12-381. + // ProjectivePoint class has not been initialized yet. + if (!Fp.eql(Fp.sqr(CURVE.Gy), weierstrassEquation(CURVE.Gx))) + throw new Error('bad generator point: equation left != right') + + // Valid group elements reside in range 1..n-1 + function isWithinCurveOrder(num: bigint): boolean { + return inRange(num, _1n, CURVE.n) + } + // Validates if priv key is valid and converts it to bigint. + // Supports options allowedPrivateKeyLengths and wrapPrivateKey. + function normPrivateKeyToScalar(key: PrivKey): bigint { + const { allowedPrivateKeyLengths: lengths, nByteLength, wrapPrivateKey, n: N } = CURVE + if (lengths && typeof key !== 'bigint') { + if (isBytes(key)) key = bytesToHex(key) + // Normalize to hex string, pad. E.g. P521 would norm 130-132 char hex to 132-char bytes + if (typeof key !== 'string' || !lengths.includes(key.length)) + throw new Error('invalid private key') + key = key.padStart(nByteLength * 2, '0') + } + let num: bigint + try { + num = + typeof key === 'bigint' + ? key + : bytesToNumberBE(ensureBytes('private key', key, nByteLength)) + } catch (error) { + throw new Error( + 'invalid private key, expected hex or ' + nByteLength + ' bytes, got ' + typeof key, + ) + } + if (wrapPrivateKey) num = mod(num, N) // disabled by default, enabled for BLS + aInRange('private key', num, _1n, N) // num in range [1..N-1] + return num + } + + function aprjpoint(other: unknown) { + if (!(other instanceof Point)) throw new Error('ProjectivePoint expected') + } + + // Memoized toAffine / validity check. They are heavy. Points are immutable. + + // Converts Projective point to affine (x, y) coordinates. + // Can accept precomputed Z^-1 - for example, from invertBatch. + // (x, y, z) ∋ (x=x/z, y=y/z) + const toAffineMemo = memoized((p: Point, iz?: T): AffinePoint => { + const { px: x, py: y, pz: z } = p + // Fast-path for normalized points + if (Fp.eql(z, Fp.ONE)) return { x, y } + const is0 = p.is0() + // If invZ was 0, we return zero point. However we still want to execute + // all operations, so we replace invZ with a random number, 1. + if (iz == null) iz = is0 ? Fp.ONE : Fp.inv(z) + const ax = Fp.mul(x, iz) + const ay = Fp.mul(y, iz) + const zz = Fp.mul(z, iz) + if (is0) return { x: Fp.ZERO, y: Fp.ZERO } + if (!Fp.eql(zz, Fp.ONE)) throw new Error('invZ was invalid') + return { x: ax, y: ay } + }) + // NOTE: on exception this will crash 'cached' and no value will be set. + // Otherwise true will be return + const assertValidMemo = memoized((p: Point) => { + if (p.is0()) { + // (0, 1, 0) aka ZERO is invalid in most contexts. + // In BLS, ZERO can be serialized, so we allow it. + // (0, 0, 0) is invalid representation of ZERO. + if (CURVE.allowInfinityPoint && !Fp.is0(p.py)) return + throw new Error('bad point: ZERO') + } + // Some 3rd-party test vectors require different wording between here & `fromCompressedHex` + const { x, y } = p.toAffine() + // Check if x, y are valid field elements + if (!Fp.isValid(x) || !Fp.isValid(y)) throw new Error('bad point: x or y not FE') + const left = Fp.sqr(y) // y² + const right = weierstrassEquation(x) // x³ + ax + b + if (!Fp.eql(left, right)) throw new Error('bad point: equation left != right') + if (!p.isTorsionFree()) throw new Error('bad point: not in prime-order subgroup') + return true + }) + + /** + * Projective Point works in 3d / projective (homogeneous) coordinates: (x, y, z) ∋ (x=x/z, y=y/z) + * Default Point works in 2d / affine coordinates: (x, y) + * We're doing calculations in projective, because its operations don't require costly inversion. + */ + class Point implements ProjPointType { + static readonly BASE = new Point(CURVE.Gx, CURVE.Gy, Fp.ONE) + static readonly ZERO = new Point(Fp.ZERO, Fp.ONE, Fp.ZERO) // 0, 1, 0 + readonly px: T + readonly py: T + readonly pz: T + + constructor(px: T, py: T, pz: T) { + if (px == null || !Fp.isValid(px)) throw new Error('x required') + if (py == null || !Fp.isValid(py) || Fp.is0(py)) throw new Error('y required') + if (pz == null || !Fp.isValid(pz)) throw new Error('z required') + this.px = px + this.py = py + this.pz = pz + Object.freeze(this) + } + + // Does not validate if the point is on-curve. + // Use fromHex instead, or call assertValidity() later. + static fromAffine(p: AffinePoint): Point { + const { x, y } = p || {} + if (!p || !Fp.isValid(x) || !Fp.isValid(y)) throw new Error('invalid affine point') + if (p instanceof Point) throw new Error('projective point not allowed') + const is0 = (i: T) => Fp.eql(i, Fp.ZERO) + // fromAffine(x:0, y:0) would produce (x:0, y:0, z:1), but we need (x:0, y:1, z:0) + if (is0(x) && is0(y)) return Point.ZERO + return new Point(x, y, Fp.ONE) + } + + get x(): T { + return this.toAffine().x + } + get y(): T { + return this.toAffine().y + } + + /** + * Takes a bunch of Projective Points but executes only one + * inversion on all of them. Inversion is very slow operation, + * so this improves performance massively. + * Optimization: converts a list of projective points to a list of identical points with Z=1. + */ + static normalizeZ(points: Point[]): Point[] { + const toInv = Fp.invertBatch(points.map((p) => p.pz)) + return points.map((p, i) => p.toAffine(toInv[i])).map(Point.fromAffine) + } + + /** + * Converts hash string or Uint8Array to Point. + * @param hex short/long ECDSA hex + */ + static fromHex(hex: Hex): Point { + const P = Point.fromAffine(fromBytes(ensureBytes('pointHex', hex))) + P.assertValidity() + return P + } + + // Multiplies generator point by privateKey. + static fromPrivateKey(privateKey: PrivKey) { + return Point.BASE.multiply(normPrivateKeyToScalar(privateKey)) + } + + // Multiscalar Multiplication + static msm(points: Point[], scalars: bigint[]): Point { + return pippenger(Point, Fn, points, scalars) + } + + // "Private method", don't use it directly + _setWindowSize(windowSize: number) { + wnaf.setWindowSize(this, windowSize) + } + + // A point on curve is valid if it conforms to equation. + assertValidity(): void { + assertValidMemo(this) + } + + hasEvenY(): boolean { + const { y } = this.toAffine() + if (Fp.isOdd) return !Fp.isOdd(y) + throw new Error("Field doesn't support isOdd") + } + + /** + * Compare one point to another. + */ + equals(other: Point): boolean { + aprjpoint(other) + const { px: X1, py: Y1, pz: Z1 } = this + const { px: X2, py: Y2, pz: Z2 } = other + const U1 = Fp.eql(Fp.mul(X1, Z2), Fp.mul(X2, Z1)) + const U2 = Fp.eql(Fp.mul(Y1, Z2), Fp.mul(Y2, Z1)) + return U1 && U2 + } + + /** + * Flips point to one corresponding to (x, -y) in Affine coordinates. + */ + negate(): Point { + return new Point(this.px, Fp.neg(this.py), this.pz) + } + + // Renes-Costello-Batina exception-free doubling formula. + // There is 30% faster Jacobian formula, but it is not complete. + // https://eprint.iacr.org/2015/1060, algorithm 3 + // Cost: 8M + 3S + 3*a + 2*b3 + 15add. + double() { + const { a, b } = CURVE + const b3 = Fp.mul(b, _3n) + const { px: X1, py: Y1, pz: Z1 } = this + let X3 = Fp.ZERO, + Y3 = Fp.ZERO, + Z3 = Fp.ZERO // prettier-ignore + let t0 = Fp.mul(X1, X1) // step 1 + let t1 = Fp.mul(Y1, Y1) + let t2 = Fp.mul(Z1, Z1) + let t3 = Fp.mul(X1, Y1) + t3 = Fp.add(t3, t3) // step 5 + Z3 = Fp.mul(X1, Z1) + Z3 = Fp.add(Z3, Z3) + X3 = Fp.mul(a, Z3) + Y3 = Fp.mul(b3, t2) + Y3 = Fp.add(X3, Y3) // step 10 + X3 = Fp.sub(t1, Y3) + Y3 = Fp.add(t1, Y3) + Y3 = Fp.mul(X3, Y3) + X3 = Fp.mul(t3, X3) + Z3 = Fp.mul(b3, Z3) // step 15 + t2 = Fp.mul(a, t2) + t3 = Fp.sub(t0, t2) + t3 = Fp.mul(a, t3) + t3 = Fp.add(t3, Z3) + Z3 = Fp.add(t0, t0) // step 20 + t0 = Fp.add(Z3, t0) + t0 = Fp.add(t0, t2) + t0 = Fp.mul(t0, t3) + Y3 = Fp.add(Y3, t0) + t2 = Fp.mul(Y1, Z1) // step 25 + t2 = Fp.add(t2, t2) + t0 = Fp.mul(t2, t3) + X3 = Fp.sub(X3, t0) + Z3 = Fp.mul(t2, t1) + Z3 = Fp.add(Z3, Z3) // step 30 + Z3 = Fp.add(Z3, Z3) + return new Point(X3, Y3, Z3) + } + + // Renes-Costello-Batina exception-free addition formula. + // There is 30% faster Jacobian formula, but it is not complete. + // https://eprint.iacr.org/2015/1060, algorithm 1 + // Cost: 12M + 0S + 3*a + 3*b3 + 23add. + add(other: Point): Point { + aprjpoint(other) + const { px: X1, py: Y1, pz: Z1 } = this + const { px: X2, py: Y2, pz: Z2 } = other + let X3 = Fp.ZERO, + Y3 = Fp.ZERO, + Z3 = Fp.ZERO // prettier-ignore + const a = CURVE.a + const b3 = Fp.mul(CURVE.b, _3n) + let t0 = Fp.mul(X1, X2) // step 1 + let t1 = Fp.mul(Y1, Y2) + let t2 = Fp.mul(Z1, Z2) + let t3 = Fp.add(X1, Y1) + let t4 = Fp.add(X2, Y2) // step 5 + t3 = Fp.mul(t3, t4) + t4 = Fp.add(t0, t1) + t3 = Fp.sub(t3, t4) + t4 = Fp.add(X1, Z1) + let t5 = Fp.add(X2, Z2) // step 10 + t4 = Fp.mul(t4, t5) + t5 = Fp.add(t0, t2) + t4 = Fp.sub(t4, t5) + t5 = Fp.add(Y1, Z1) + X3 = Fp.add(Y2, Z2) // step 15 + t5 = Fp.mul(t5, X3) + X3 = Fp.add(t1, t2) + t5 = Fp.sub(t5, X3) + Z3 = Fp.mul(a, t4) + X3 = Fp.mul(b3, t2) // step 20 + Z3 = Fp.add(X3, Z3) + X3 = Fp.sub(t1, Z3) + Z3 = Fp.add(t1, Z3) + Y3 = Fp.mul(X3, Z3) + t1 = Fp.add(t0, t0) // step 25 + t1 = Fp.add(t1, t0) + t2 = Fp.mul(a, t2) + t4 = Fp.mul(b3, t4) + t1 = Fp.add(t1, t2) + t2 = Fp.sub(t0, t2) // step 30 + t2 = Fp.mul(a, t2) + t4 = Fp.add(t4, t2) + t0 = Fp.mul(t1, t4) + Y3 = Fp.add(Y3, t0) + t0 = Fp.mul(t5, t4) // step 35 + X3 = Fp.mul(t3, X3) + X3 = Fp.sub(X3, t0) + t0 = Fp.mul(t3, t1) + Z3 = Fp.mul(t5, Z3) + Z3 = Fp.add(Z3, t0) // step 40 + return new Point(X3, Y3, Z3) + } + + subtract(other: Point) { + return this.add(other.negate()) + } + + is0() { + return this.equals(Point.ZERO) + } + + private wNAF(n: bigint): { p: Point; f: Point } { + return wnaf.wNAFCached(this, n, Point.normalizeZ) + } + + /** + * Non-constant-time multiplication. Uses double-and-add algorithm. + * It's faster, but should only be used when you don't care about + * an exposed private key e.g. sig verification, which works over *public* keys. + */ + multiplyUnsafe(sc: bigint): Point { + const { endo, n: N } = CURVE + aInRange('scalar', sc, _0n, N) + const I = Point.ZERO + if (sc === _0n) return I + if (this.is0() || sc === _1n) return this + + // Case a: no endomorphism. Case b: has precomputes. + if (!endo || wnaf.hasPrecomputes(this)) + return wnaf.wNAFCachedUnsafe(this, sc, Point.normalizeZ) + + // Case c: endomorphism + let { k1neg, k1, k2neg, k2 } = endo.splitScalar(sc) + let k1p = I + let k2p = I + let d: Point = this + while (k1 > _0n || k2 > _0n) { + if (k1 & _1n) k1p = k1p.add(d) + if (k2 & _1n) k2p = k2p.add(d) + d = d.double() + k1 >>= _1n + k2 >>= _1n + } + if (k1neg) k1p = k1p.negate() + if (k2neg) k2p = k2p.negate() + k2p = new Point(Fp.mul(k2p.px, endo.beta), k2p.py, k2p.pz) + return k1p.add(k2p) + } + + /** + * Constant time multiplication. + * Uses wNAF method. Windowed method may be 10% faster, + * but takes 2x longer to generate and consumes 2x memory. + * Uses precomputes when available. + * Uses endomorphism for Koblitz curves. + * @param scalar by which the point would be multiplied + * @returns New point + */ + multiply(scalar: bigint): Point { + const { endo, n: N } = CURVE + aInRange('scalar', scalar, _1n, N) + let point: Point, fake: Point // Fake point is used to const-time mult + if (endo) { + const { k1neg, k1, k2neg, k2 } = endo.splitScalar(scalar) + let { p: k1p, f: f1p } = this.wNAF(k1) + let { p: k2p, f: f2p } = this.wNAF(k2) + k1p = wnaf.constTimeNegate(k1neg, k1p) + k2p = wnaf.constTimeNegate(k2neg, k2p) + k2p = new Point(Fp.mul(k2p.px, endo.beta), k2p.py, k2p.pz) + point = k1p.add(k2p) + fake = f1p.add(f2p) + } else { + const { p, f } = this.wNAF(scalar) + point = p + fake = f + } + // Normalize `z` for both points, but return only real one + return Point.normalizeZ([point, fake])[0] + } + + /** + * Efficiently calculate `aP + bQ`. Unsafe, can expose private key, if used incorrectly. + * Not using Strauss-Shamir trick: precomputation tables are faster. + * The trick could be useful if both P and Q are not G (not in our case). + * @returns non-zero affine point + */ + multiplyAndAddUnsafe(Q: Point, a: bigint, b: bigint): Point | undefined { + const G = Point.BASE // No Strauss-Shamir trick: we have 10% faster G precomputes + const mul = ( + P: Point, + a: bigint, // Select faster multiply() method + ) => (a === _0n || a === _1n || !P.equals(G) ? P.multiplyUnsafe(a) : P.multiply(a)) + const sum = mul(this, a).add(mul(Q, b)) + return sum.is0() ? undefined : sum + } + + // Converts Projective point to affine (x, y) coordinates. + // Can accept precomputed Z^-1 - for example, from invertBatch. + // (x, y, z) ∋ (x=x/z, y=y/z) + toAffine(iz?: T): AffinePoint { + return toAffineMemo(this, iz) + } + isTorsionFree(): boolean { + const { h: cofactor, isTorsionFree } = CURVE + if (cofactor === _1n) return true // No subgroups, always torsion-free + if (isTorsionFree) return isTorsionFree(Point, this) + throw new Error('isTorsionFree() has not been declared for the elliptic curve') + } + clearCofactor(): Point { + const { h: cofactor, clearCofactor } = CURVE + if (cofactor === _1n) return this // Fast-path + if (clearCofactor) return clearCofactor(Point, this) as Point + return this.multiplyUnsafe(CURVE.h) + } + + toRawBytes(isCompressed = true): Uint8Array { + abool('isCompressed', isCompressed) + this.assertValidity() + return toBytes(Point, this, isCompressed) + } + + toHex(isCompressed = true): string { + abool('isCompressed', isCompressed) + return bytesToHex(this.toRawBytes(isCompressed)) + } + } + const _bits = CURVE.nBitLength + const wnaf = wNAF(Point, CURVE.endo ? Math.ceil(_bits / 2) : _bits) + // Validate if generator point is on curve + return { + CURVE, + ProjectivePoint: Point as ProjConstructor, + normPrivateKeyToScalar, + weierstrassEquation, + isWithinCurveOrder, + } +} + +// Instance +export interface SignatureType { + readonly r: bigint + readonly s: bigint + readonly recovery?: number + assertValidity(): void + addRecoveryBit(recovery: number): RecoveredSignatureType + hasHighS(): boolean + normalizeS(): SignatureType + recoverPublicKey(msgHash: Hex): ProjPointType + toCompactRawBytes(): Uint8Array + toCompactHex(): string + toDERRawBytes(isCompressed?: boolean): Uint8Array + toDERHex(isCompressed?: boolean): string +} +export type RecoveredSignatureType = SignatureType & { + readonly recovery: number +} +// Static methods +export type SignatureConstructor = { + new (r: bigint, s: bigint): SignatureType + fromCompact(hex: Hex): SignatureType + fromDER(hex: Hex): SignatureType +} +type SignatureLike = { r: bigint; s: bigint } + +export type PubKey = Hex | ProjPointType + +export type CurveType = BasicWCurve & { + hash: CHash // CHash not FHash because we need outputLen for DRBG + hmac: HmacFnSync + randomBytes: (bytesLength?: number) => Uint8Array + lowS?: boolean + bits2int?: (bytes: Uint8Array) => bigint + bits2int_modN?: (bytes: Uint8Array) => bigint +} + +function validateOpts( + curve: CurveType, +): Readonly { + const opts = validateBasic(curve) + validateObject( + opts, + { + hash: 'hash', + hmac: 'function', + randomBytes: 'function', + }, + { + bits2int: 'function', + bits2int_modN: 'function', + lowS: 'boolean', + }, + ) + return Object.freeze({ lowS: true, ...opts } as const) +} + +export type CurveFn = { + CURVE: ReturnType + getPublicKey: (privateKey: PrivKey, isCompressed?: boolean) => Uint8Array + getSharedSecret: (privateA: PrivKey, publicB: Hex, isCompressed?: boolean) => Uint8Array + sign: (msgHash: Hex, privKey: PrivKey, opts?: SignOpts) => RecoveredSignatureType + verify: (signature: Hex | SignatureLike, msgHash: Hex, publicKey: Hex, opts?: VerOpts) => boolean + ProjectivePoint: ProjConstructor + Signature: SignatureConstructor + utils: { + normPrivateKeyToScalar: (key: PrivKey) => bigint + isValidPrivateKey(privateKey: PrivKey): boolean + randomPrivateKey: () => Uint8Array + precompute: (windowSize?: number, point?: ProjPointType) => ProjPointType + } +} + +/** + * Creates short weierstrass curve and ECDSA signature methods for it. + * @example + * import { Field } from '@noble/curves/abstract/modular'; + * // Before that, define BigInt-s: a, b, p, n, Gx, Gy + * const curve = weierstrass({ a, b, Fp: Field(p), n, Gx, Gy, h: 1n }) + */ +export function weierstrass(curveDef: CurveType): CurveFn { + const CURVE = validateOpts(curveDef) as ReturnType + const { Fp, n: CURVE_ORDER } = CURVE + const compressedLen = Fp.BYTES + 1 // e.g. 33 for 32 + const uncompressedLen = 2 * Fp.BYTES + 1 // e.g. 65 for 32 + + function modN(a: bigint) { + return mod(a, CURVE_ORDER) + } + function invN(a: bigint) { + return invert(a, CURVE_ORDER) + } + + const { + ProjectivePoint: Point, + normPrivateKeyToScalar, + weierstrassEquation, + isWithinCurveOrder, + } = weierstrassPoints({ + ...CURVE, + toBytes(_c, point, isCompressed: boolean): Uint8Array { + const a = point.toAffine() + const x = Fp.toBytes(a.x) + const cat = concatBytes + abool('isCompressed', isCompressed) + if (isCompressed) { + return cat(Uint8Array.from([point.hasEvenY() ? 0x02 : 0x03]), x) + } else { + return cat(Uint8Array.from([0x04]), x, Fp.toBytes(a.y)) + } + }, + fromBytes(bytes: Uint8Array) { + const len = bytes.length + const head = bytes[0] + const tail = bytes.subarray(1) + // this.assertValidity() is done inside of fromHex + if (len === compressedLen && (head === 0x02 || head === 0x03)) { + const x = bytesToNumberBE(tail) + if (!inRange(x, _1n, Fp.ORDER)) throw new Error('Point is not on curve') + const y2 = weierstrassEquation(x) // y² = x³ + ax + b + let y: bigint + try { + y = Fp.sqrt(y2) // y = y² ^ (p+1)/4 + } catch (sqrtError) { + const suffix = sqrtError instanceof Error ? ': ' + sqrtError.message : '' + throw new Error('Point is not on curve' + suffix) + } + const isYOdd = (y & _1n) === _1n + // ECDSA + const isHeadOdd = (head & 1) === 1 + if (isHeadOdd !== isYOdd) y = Fp.neg(y) + return { x, y } + } else if (len === uncompressedLen && head === 0x04) { + const x = Fp.fromBytes(tail.subarray(0, Fp.BYTES)) + const y = Fp.fromBytes(tail.subarray(Fp.BYTES, 2 * Fp.BYTES)) + return { x, y } + } else { + const cl = compressedLen + const ul = uncompressedLen + throw new Error( + 'invalid Point, expected length of ' + cl + ', or uncompressed ' + ul + ', got ' + len, + ) + } + }, + }) + const numToNByteHex = (num: bigint): string => bytesToHex(numberToBytesBE(num, CURVE.nByteLength)) + + function isBiggerThanHalfOrder(number: bigint) { + const HALF = CURVE_ORDER >> _1n + return number > HALF + } + + function normalizeS(s: bigint) { + return isBiggerThanHalfOrder(s) ? modN(-s) : s + } + // slice bytes num + const slcNum = (b: Uint8Array, from: number, to: number) => bytesToNumberBE(b.slice(from, to)) + + /** + * ECDSA signature with its (r, s) properties. Supports DER & compact representations. + */ + class Signature implements SignatureType { + readonly r: bigint + readonly s: bigint + readonly recovery?: number + constructor(r: bigint, s: bigint, recovery?: number) { + aInRange('r', r, _1n, CURVE_ORDER) // r in [1..N] + aInRange('s', s, _1n, CURVE_ORDER) // s in [1..N] + this.r = r + this.s = s + if (recovery != null) this.recovery = recovery + Object.freeze(this) + } + + // pair (bytes of r, bytes of s) + static fromCompact(hex: Hex) { + const l = CURVE.nByteLength + hex = ensureBytes('compactSignature', hex, l * 2) + return new Signature(slcNum(hex, 0, l), slcNum(hex, l, 2 * l)) + } + + // DER encoded ECDSA signature + // https://bitcoin.stackexchange.com/questions/57644/what-are-the-parts-of-a-bitcoin-transaction-input-script + static fromDER(hex: Hex) { + const { r, s } = DER.toSig(ensureBytes('DER', hex)) + return new Signature(r, s) + } + + /** + * @todo remove + * @deprecated + */ + assertValidity(): void {} + + addRecoveryBit(recovery: number): RecoveredSignature { + return new Signature(this.r, this.s, recovery) as RecoveredSignature + } + + recoverPublicKey(msgHash: Hex): typeof Point.BASE { + const { r, s, recovery: rec } = this + const h = bits2int_modN(ensureBytes('msgHash', msgHash)) // Truncate hash + if (rec == null || ![0, 1, 2, 3].includes(rec)) throw new Error('recovery id invalid') + const radj = rec === 2 || rec === 3 ? r + CURVE.n : r + if (radj >= Fp.ORDER) throw new Error('recovery id 2 or 3 invalid') + const prefix = (rec & 1) === 0 ? '02' : '03' + const R = Point.fromHex(prefix + numToNByteHex(radj)) + const ir = invN(radj) // r^-1 + const u1 = modN(-h * ir) // -hr^-1 + const u2 = modN(s * ir) // sr^-1 + const Q = Point.BASE.multiplyAndAddUnsafe(R, u1, u2) // (sr^-1)R-(hr^-1)G = -(hr^-1)G + (sr^-1) + if (!Q) throw new Error('point at infinify') // unsafe is fine: no priv data leaked + Q.assertValidity() + return Q + } + + // Signatures should be low-s, to prevent malleability. + hasHighS(): boolean { + return isBiggerThanHalfOrder(this.s) + } + + normalizeS() { + return this.hasHighS() ? new Signature(this.r, modN(-this.s), this.recovery) : this + } + + // DER-encoded + toDERRawBytes() { + return hexToBytes(this.toDERHex()) + } + toDERHex() { + return DER.hexFromSig({ r: this.r, s: this.s }) + } + + // padded bytes of r, then padded bytes of s + toCompactRawBytes() { + return hexToBytes(this.toCompactHex()) + } + toCompactHex() { + return numToNByteHex(this.r) + numToNByteHex(this.s) + } + } + type RecoveredSignature = Signature & { recovery: number } + + const utils = { + isValidPrivateKey(privateKey: PrivKey) { + try { + normPrivateKeyToScalar(privateKey) + return true + } catch (error) { + return false + } + }, + normPrivateKeyToScalar: normPrivateKeyToScalar, + + /** + * Produces cryptographically secure private key from random of size + * (groupLen + ceil(groupLen / 2)) with modulo bias being negligible. + */ + randomPrivateKey: (): Uint8Array => { + const length = getMinHashLength(CURVE.n) + return mapHashToField(CURVE.randomBytes(length), CURVE.n) + }, + + /** + * Creates precompute table for an arbitrary EC point. Makes point "cached". + * Allows to massively speed-up `point.multiply(scalar)`. + * @returns cached point + * @example + * const fast = utils.precompute(8, ProjectivePoint.fromHex(someonesPubKey)); + * fast.multiply(privKey); // much faster ECDH now + */ + precompute(windowSize = 8, point = Point.BASE): typeof Point.BASE { + point._setWindowSize(windowSize) + point.multiply(BigInt(3)) // 3 is arbitrary, just need any number here + return point + }, + } + + /** + * Computes public key for a private key. Checks for validity of the private key. + * @param privateKey private key + * @param isCompressed whether to return compact (default), or full key + * @returns Public key, full when isCompressed=false; short when isCompressed=true + */ + function getPublicKey(privateKey: PrivKey, isCompressed = true): Uint8Array { + return Point.fromPrivateKey(privateKey).toRawBytes(isCompressed) + } + + /** + * Quick and dirty check for item being public key. Does not validate hex, or being on-curve. + */ + function isProbPub(item: PrivKey | PubKey): boolean { + const arr = isBytes(item) + const str = typeof item === 'string' + const len = (arr || str) && (item as Hex).length + if (arr) return len === compressedLen || len === uncompressedLen + if (str) return len === 2 * compressedLen || len === 2 * uncompressedLen + if (item instanceof Point) return true + return false + } + + /** + * ECDH (Elliptic Curve Diffie Hellman). + * Computes shared public key from private key and public key. + * Checks: 1) private key validity 2) shared key is on-curve. + * Does NOT hash the result. + * @param privateA private key + * @param publicB different public key + * @param isCompressed whether to return compact (default), or full key + * @returns shared public key + */ + function getSharedSecret(privateA: PrivKey, publicB: Hex, isCompressed = true): Uint8Array { + if (isProbPub(privateA)) throw new Error('first arg must be private key') + if (!isProbPub(publicB)) throw new Error('second arg must be public key') + const b = Point.fromHex(publicB) // check for being on-curve + return b.multiply(normPrivateKeyToScalar(privateA)).toRawBytes(isCompressed) + } + + // RFC6979: ensure ECDSA msg is X bytes and < N. RFC suggests optional truncating via bits2octets. + // FIPS 186-4 4.6 suggests the leftmost min(nBitLen, outLen) bits, which matches bits2int. + // bits2int can produce res>N, we can do mod(res, N) since the bitLen is the same. + // int2octets can't be used; pads small msgs with 0: unacceptatble for trunc as per RFC vectors + const bits2int = + CURVE.bits2int || + function (bytes: Uint8Array): bigint { + // Our custom check "just in case" + if (bytes.length > 8192) throw new Error('input is too large') + // For curves with nBitLength % 8 !== 0: bits2octets(bits2octets(m)) !== bits2octets(m) + // for some cases, since bytes.length * 8 is not actual bitLength. + const num = bytesToNumberBE(bytes) // check for == u8 done here + const delta = bytes.length * 8 - CURVE.nBitLength // truncate to nBitLength leftmost bits + return delta > 0 ? num >> BigInt(delta) : num + } + const bits2int_modN = + CURVE.bits2int_modN || + function (bytes: Uint8Array): bigint { + return modN(bits2int(bytes)) // can't use bytesToNumberBE here + } + // NOTE: pads output with zero as per spec + const ORDER_MASK = bitMask(CURVE.nBitLength) + /** + * Converts to bytes. Checks if num in `[0..ORDER_MASK-1]` e.g.: `[0..2^256-1]`. + */ + function int2octets(num: bigint): Uint8Array { + aInRange('num < 2^' + CURVE.nBitLength, num, _0n, ORDER_MASK) + // works with order, can have different size than numToField! + return numberToBytesBE(num, CURVE.nByteLength) + } + + // Steps A, D of RFC6979 3.2 + // Creates RFC6979 seed; converts msg/privKey to numbers. + // Used only in sign, not in verify. + // NOTE: we cannot assume here that msgHash has same amount of bytes as curve order, + // this will be invalid at least for P521. Also it can be bigger for P224 + SHA256 + function prepSig(msgHash: Hex, privateKey: PrivKey, opts = defaultSigOpts) { + if (['recovered', 'canonical'].some((k) => k in opts)) + throw new Error('sign() legacy options not supported') + const { hash, randomBytes } = CURVE + let { lowS, prehash, extraEntropy: ent } = opts // generates low-s sigs by default + if (lowS == null) lowS = true // RFC6979 3.2: we skip step A, because we already provide hash + msgHash = ensureBytes('msgHash', msgHash) + validateSigVerOpts(opts) + if (prehash) msgHash = ensureBytes('prehashed msgHash', hash(msgHash)) + + // We can't later call bits2octets, since nested bits2int is broken for curves + // with nBitLength % 8 !== 0. Because of that, we unwrap it here as int2octets call. + // const bits2octets = (bits) => int2octets(bits2int_modN(bits)) + const h1int = bits2int_modN(msgHash) + const d = normPrivateKeyToScalar(privateKey) // validate private key, convert to bigint + const seedArgs = [int2octets(d), int2octets(h1int)] + // extraEntropy. RFC6979 3.6: additional k' (optional). + if (ent != null && ent !== false) { + // K = HMAC_K(V || 0x00 || int2octets(x) || bits2octets(h1) || k') + const e = ent === true ? randomBytes(Fp.BYTES) : ent // generate random bytes OR pass as-is + seedArgs.push(ensureBytes('extraEntropy', e)) // check for being bytes + } + const seed = concatBytes(...seedArgs) // Step D of RFC6979 3.2 + const m = h1int // NOTE: no need to call bits2int second time here, it is inside truncateHash! + // Converts signature params into point w r/s, checks result for validity. + function k2sig(kBytes: Uint8Array): RecoveredSignature | undefined { + // RFC 6979 Section 3.2, step 3: k = bits2int(T) + const k = bits2int(kBytes) // Cannot use fields methods, since it is group element + if (!isWithinCurveOrder(k)) return // Important: all mod() calls here must be done over N + const ik = invN(k) // k^-1 mod n + const q = Point.BASE.multiply(k).toAffine() // q = Gk + const r = modN(q.x) // r = q.x mod n + if (r === _0n) return + // Can use scalar blinding b^-1(bm + bdr) where b ∈ [1,q−1] according to + // https://tches.iacr.org/index.php/TCHES/article/view/7337/6509. We've decided against it: + // a) dependency on CSPRNG b) 15% slowdown c) doesn't really help since bigints are not CT + const s = modN(ik * modN(m + r * d)) // Not using blinding here + if (s === _0n) return + let recovery = (q.x === r ? 0 : 2) | Number(q.y & _1n) // recovery bit (2 or 3, when q.x > n) + let normS = s + if (lowS && isBiggerThanHalfOrder(s)) { + normS = normalizeS(s) // if lowS was passed, ensure s is always + recovery ^= 1 // // in the bottom half of N + } + return new Signature(r, normS, recovery) as RecoveredSignature // use normS, not s + } + return { seed, k2sig } + } + const defaultSigOpts: SignOpts = { lowS: CURVE.lowS, prehash: false } + const defaultVerOpts: VerOpts = { lowS: CURVE.lowS, prehash: false } + + /** + * Signs message hash with a private key. + * ``` + * sign(m, d, k) where + * (x, y) = G × k + * r = x mod n + * s = (m + dr)/k mod n + * ``` + * @param msgHash NOT message. msg needs to be hashed to `msgHash`, or use `prehash`. + * @param privKey private key + * @param opts lowS for non-malleable sigs. extraEntropy for mixing randomness into k. prehash will hash first arg. + * @returns signature with recovery param + */ + function sign(msgHash: Hex, privKey: PrivKey, opts = defaultSigOpts): RecoveredSignature { + const { seed, k2sig } = prepSig(msgHash, privKey, opts) // Steps A, D of RFC6979 3.2. + const C = CURVE + const drbg = createHmacDrbg(C.hash.outputLen, C.nByteLength, C.hmac) + return drbg(seed, k2sig) // Steps B, C, D, E, F, G + } + + // Enable precomputes. Slows down first publicKey computation by 20ms. + Point.BASE._setWindowSize(8) + // utils.precompute(8, ProjectivePoint.BASE) + + /** + * Verifies a signature against message hash and public key. + * Rejects lowS signatures by default: to override, + * specify option `{lowS: false}`. Implements section 4.1.4 from https://www.secg.org/sec1-v2.pdf: + * + * ``` + * verify(r, s, h, P) where + * U1 = hs^-1 mod n + * U2 = rs^-1 mod n + * R = U1⋅G - U2⋅P + * mod(R.x, n) == r + * ``` + */ + function verify( + signature: Hex | SignatureLike, + msgHash: Hex, + publicKey: Hex, + opts = defaultVerOpts, + ): boolean { + const sg = signature + msgHash = ensureBytes('msgHash', msgHash) + publicKey = ensureBytes('publicKey', publicKey) + const { lowS, prehash, format } = opts + + // Verify opts, deduce signature format + validateSigVerOpts(opts) + if ('strict' in opts) throw new Error('options.strict was renamed to lowS') + if (format !== undefined && format !== 'compact' && format !== 'der') + throw new Error('format must be compact or der') + const isHex = typeof sg === 'string' || isBytes(sg) + const isObj = + !isHex && + !format && + typeof sg === 'object' && + sg !== null && + typeof sg.r === 'bigint' && + typeof sg.s === 'bigint' + if (!isHex && !isObj) + throw new Error('invalid signature, expected Uint8Array, hex string or Signature instance') + + let _sig: Signature | undefined = undefined + let P: ProjPointType + try { + if (isObj) _sig = new Signature(sg.r, sg.s) + if (isHex) { + // Signature can be represented in 2 ways: compact (2*nByteLength) & DER (variable-length). + // Since DER can also be 2*nByteLength bytes, we check for it first. + try { + if (format !== 'compact') _sig = Signature.fromDER(sg) + } catch (derError) { + if (!(derError instanceof DER.Err)) throw derError + } + if (!_sig && format !== 'der') _sig = Signature.fromCompact(sg) + } + P = Point.fromHex(publicKey) + } catch (error) { + return false + } + if (!_sig) return false + if (lowS && _sig.hasHighS()) return false + if (prehash) msgHash = CURVE.hash(msgHash) + const { r, s } = _sig + const h = bits2int_modN(msgHash) // Cannot use fields methods, since it is group element + const is = invN(s) // s^-1 + const u1 = modN(h * is) // u1 = hs^-1 mod n + const u2 = modN(r * is) // u2 = rs^-1 mod n + const R = Point.BASE.multiplyAndAddUnsafe(P, u1, u2)?.toAffine() // R = u1⋅G + u2⋅P + if (!R) return false + const v = modN(R.x) + return v === r + } + return { + CURVE, + getPublicKey, + getSharedSecret, + sign, + verify, + ProjectivePoint: Point, + Signature, + utils, + } +} + +/** + * Implementation of the Shallue and van de Woestijne method for any weierstrass curve. + * TODO: check if there is a way to merge this with uvRatio in Edwards; move to modular. + * b = True and y = sqrt(u / v) if (u / v) is square in F, and + * b = False and y = sqrt(Z * (u / v)) otherwise. + * @param Fp + * @param Z + * @returns + */ +export function SWUFpSqrtRatio( + Fp: IField, + Z: T, +): (u: T, v: T) => { isValid: boolean; value: T } { + // Generic implementation + const q = Fp.ORDER + let l = _0n + for (let o = q - _1n; o % _2n === _0n; o /= _2n) l += _1n + const c1 = l // 1. c1, the largest integer such that 2^c1 divides q - 1. + // We need 2n ** c1 and 2n ** (c1-1). We can't use **; but we can use <<. + // 2n ** c1 == 2n << (c1-1) + const _2n_pow_c1_1 = _2n << (c1 - _1n - _1n) + const _2n_pow_c1 = _2n_pow_c1_1 * _2n + const c2 = (q - _1n) / _2n_pow_c1 // 2. c2 = (q - 1) / (2^c1) # Integer arithmetic + const c3 = (c2 - _1n) / _2n // 3. c3 = (c2 - 1) / 2 # Integer arithmetic + const c4 = _2n_pow_c1 - _1n // 4. c4 = 2^c1 - 1 # Integer arithmetic + const c5 = _2n_pow_c1_1 // 5. c5 = 2^(c1 - 1) # Integer arithmetic + const c6 = Fp.pow(Z, c2) // 6. c6 = Z^c2 + const c7 = Fp.pow(Z, (c2 + _1n) / _2n) // 7. c7 = Z^((c2 + 1) / 2) + let sqrtRatio = (u: T, v: T): { isValid: boolean; value: T } => { + let tv1 = c6 // 1. tv1 = c6 + let tv2 = Fp.pow(v, c4) // 2. tv2 = v^c4 + let tv3 = Fp.sqr(tv2) // 3. tv3 = tv2^2 + tv3 = Fp.mul(tv3, v) // 4. tv3 = tv3 * v + let tv5 = Fp.mul(u, tv3) // 5. tv5 = u * tv3 + tv5 = Fp.pow(tv5, c3) // 6. tv5 = tv5^c3 + tv5 = Fp.mul(tv5, tv2) // 7. tv5 = tv5 * tv2 + tv2 = Fp.mul(tv5, v) // 8. tv2 = tv5 * v + tv3 = Fp.mul(tv5, u) // 9. tv3 = tv5 * u + let tv4 = Fp.mul(tv3, tv2) // 10. tv4 = tv3 * tv2 + tv5 = Fp.pow(tv4, c5) // 11. tv5 = tv4^c5 + let isQR = Fp.eql(tv5, Fp.ONE) // 12. isQR = tv5 == 1 + tv2 = Fp.mul(tv3, c7) // 13. tv2 = tv3 * c7 + tv5 = Fp.mul(tv4, tv1) // 14. tv5 = tv4 * tv1 + tv3 = Fp.cmov(tv2, tv3, isQR) // 15. tv3 = CMOV(tv2, tv3, isQR) + tv4 = Fp.cmov(tv5, tv4, isQR) // 16. tv4 = CMOV(tv5, tv4, isQR) + // 17. for i in (c1, c1 - 1, ..., 2): + for (let i = c1; i > _1n; i--) { + let tv5 = i - _2n // 18. tv5 = i - 2 + tv5 = _2n << (tv5 - _1n) // 19. tv5 = 2^tv5 + let tvv5 = Fp.pow(tv4, tv5) // 20. tv5 = tv4^tv5 + const e1 = Fp.eql(tvv5, Fp.ONE) // 21. e1 = tv5 == 1 + tv2 = Fp.mul(tv3, tv1) // 22. tv2 = tv3 * tv1 + tv1 = Fp.mul(tv1, tv1) // 23. tv1 = tv1 * tv1 + tvv5 = Fp.mul(tv4, tv1) // 24. tv5 = tv4 * tv1 + tv3 = Fp.cmov(tv2, tv3, e1) // 25. tv3 = CMOV(tv2, tv3, e1) + tv4 = Fp.cmov(tvv5, tv4, e1) // 26. tv4 = CMOV(tv5, tv4, e1) + } + return { isValid: isQR, value: tv3 } + } + if (Fp.ORDER % _4n === _3n) { + // sqrt_ratio_3mod4(u, v) + const c1 = (Fp.ORDER - _3n) / _4n // 1. c1 = (q - 3) / 4 # Integer arithmetic + const c2 = Fp.sqrt(Fp.neg(Z)) // 2. c2 = sqrt(-Z) + sqrtRatio = (u: T, v: T) => { + let tv1 = Fp.sqr(v) // 1. tv1 = v^2 + const tv2 = Fp.mul(u, v) // 2. tv2 = u * v + tv1 = Fp.mul(tv1, tv2) // 3. tv1 = tv1 * tv2 + let y1 = Fp.pow(tv1, c1) // 4. y1 = tv1^c1 + y1 = Fp.mul(y1, tv2) // 5. y1 = y1 * tv2 + const y2 = Fp.mul(y1, c2) // 6. y2 = y1 * c2 + const tv3 = Fp.mul(Fp.sqr(y1), v) // 7. tv3 = y1^2; 8. tv3 = tv3 * v + const isQR = Fp.eql(tv3, u) // 9. isQR = tv3 == u + let y = Fp.cmov(y2, y1, isQR) // 10. y = CMOV(y2, y1, isQR) + return { isValid: isQR, value: y } // 11. return (isQR, y) isQR ? y : y*c2 + } + } + // No curves uses that + // if (Fp.ORDER % _8n === _5n) // sqrt_ratio_5mod8 + return sqrtRatio +} +/** + * Simplified Shallue-van de Woestijne-Ulas Method + * https://www.rfc-editor.org/rfc/rfc9380#section-6.6.2 + */ +export function mapToCurveSimpleSWU( + Fp: IField, + opts: { + A: T + B: T + Z: T + }, +): (u: T) => { x: T; y: T } { + validateField(Fp) + if (!Fp.isValid(opts.A) || !Fp.isValid(opts.B) || !Fp.isValid(opts.Z)) + throw new Error('mapToCurveSimpleSWU: invalid opts') + const sqrtRatio = SWUFpSqrtRatio(Fp, opts.Z) + if (!Fp.isOdd) throw new Error('Fp.isOdd is not implemented!') + // Input: u, an element of F. + // Output: (x, y), a point on E. + return (u: T): { x: T; y: T } => { + // prettier-ignore + let tv1, tv2, tv3, tv4, tv5, tv6, x, y + tv1 = Fp.sqr(u) // 1. tv1 = u^2 + tv1 = Fp.mul(tv1, opts.Z) // 2. tv1 = Z * tv1 + tv2 = Fp.sqr(tv1) // 3. tv2 = tv1^2 + tv2 = Fp.add(tv2, tv1) // 4. tv2 = tv2 + tv1 + tv3 = Fp.add(tv2, Fp.ONE) // 5. tv3 = tv2 + 1 + tv3 = Fp.mul(tv3, opts.B) // 6. tv3 = B * tv3 + tv4 = Fp.cmov(opts.Z, Fp.neg(tv2), !Fp.eql(tv2, Fp.ZERO)) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0) + tv4 = Fp.mul(tv4, opts.A) // 8. tv4 = A * tv4 + tv2 = Fp.sqr(tv3) // 9. tv2 = tv3^2 + tv6 = Fp.sqr(tv4) // 10. tv6 = tv4^2 + tv5 = Fp.mul(tv6, opts.A) // 11. tv5 = A * tv6 + tv2 = Fp.add(tv2, tv5) // 12. tv2 = tv2 + tv5 + tv2 = Fp.mul(tv2, tv3) // 13. tv2 = tv2 * tv3 + tv6 = Fp.mul(tv6, tv4) // 14. tv6 = tv6 * tv4 + tv5 = Fp.mul(tv6, opts.B) // 15. tv5 = B * tv6 + tv2 = Fp.add(tv2, tv5) // 16. tv2 = tv2 + tv5 + x = Fp.mul(tv1, tv3) // 17. x = tv1 * tv3 + const { isValid, value } = sqrtRatio(tv2, tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6) + y = Fp.mul(tv1, u) // 19. y = tv1 * u -> Z * u^3 * y1 + y = Fp.mul(y, value) // 20. y = y * y1 + x = Fp.cmov(x, tv3, isValid) // 21. x = CMOV(x, tv3, is_gx1_square) + y = Fp.cmov(y, value, isValid) // 22. y = CMOV(y, y1, is_gx1_square) + const e1 = Fp.isOdd!(u) === Fp.isOdd!(y) // 23. e1 = sgn0(u) == sgn0(y) + y = Fp.cmov(Fp.neg(y), y, e1) // 24. y = CMOV(-y, y, e1) + x = Fp.div(x, tv4) // 25. x = x / tv4 + return { x, y } + } +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/bls12-381.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/bls12-381.ts new file mode 100644 index 00000000000..067591e0a98 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/bls12-381.ts @@ -0,0 +1,776 @@ +/** + * bls12-381 is pairing-friendly Barreto-Lynn-Scott elliptic curve construction allowing to: + * * Construct zk-SNARKs at the ~120-bit security + * * Efficiently verify N aggregate signatures with 1 pairing and N ec additions: + * the Boneh-Lynn-Shacham signature scheme is orders of magnitude more efficient than Schnorr + * + * ### Summary + * 1. BLS Relies on Bilinear Pairing (expensive) + * 2. Private Keys: 32 bytes + * 3. Public Keys: 48 bytes: 381 bit affine x coordinate, encoded into 48 big-endian bytes. + * 4. Signatures: 96 bytes: two 381 bit integers (affine x coordinate), encoded into two 48 big-endian byte arrays. + * - The signature is a point on the G2 subgroup, which is defined over a finite field + * with elements twice as big as the G1 curve (G2 is over Fp2 rather than Fp. Fp2 is analogous to the + * complex numbers). + * - We also support reversed 96-byte pubkeys & 48-byte short signatures. + * 5. The 12 stands for the Embedding degree. + * + * ### Formulas + * - `P = pk x G` - public keys + * - `S = pk x H(m)` - signing + * - `e(P, H(m)) == e(G, S)` - verification using pairings + * - `e(G, S) = e(G, SUM(n)(Si)) = MUL(n)(e(G, Si))` - signature aggregation + * + * ### Compatibility and notes + * 1. It is compatible with Algorand, Chia, Dfinity, Ethereum, Filecoin, ZEC. + * Filecoin uses little endian byte arrays for private keys - make sure to reverse byte order. + * 2. Some projects use G2 for public keys and G1 for signatures. It's called "short signature". + * 3. Curve security level is about 120 bits as per [Barbulescu-Duquesne 2017](https://hal.science/hal-01534101/file/main.pdf) + * 4. Compatible with specs: + * [cfrg-pairing-friendly-curves-11](https://tools.ietf.org/html/draft-irtf-cfrg-pairing-friendly-curves-11), + * [cfrg-bls-signature-05](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-05), + * RFC 9380. + * + * ### Params + * To verify curve parameters, see + * [pairing-friendly-curves spec](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-pairing-friendly-curves-11). + * Basic math is done over finite fields over p. + * More complicated math is done over polynominal extension fields. + * To simplify calculations in Fp12, we construct extension tower: + * + * Embedding degree (k): 12 + * Seed (X): -15132376222941642752 + * Fr: (x⁴-x²+1) + * Fp: ((x-1)² ⋅ r(x)/3+x) + * (E/Fp): Y²=X³+4 + * (Eₜ/Fp²): Y² = X³+4(u+1) (M-type twist) + * Ate loop size: X + * + * ### Towers + * - Fp₁₂ = Fp₆² => Fp₂³ + * - Fp(u) / (u² - β) where β = -1 + * - Fp₂(v) / (v³ - ξ) where ξ = u + 1 + * - Fp₆(w) / (w² - γ) where γ = v + * - Fp²[u] = Fp/u²+1 + * - Fp⁶[v] = Fp²/v³-1-u + * - Fp¹²[w] = Fp⁶/w²-v + * + * @todo construct bls & bn fp/fr from seed. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { sha256 } from '@noble/hashes/sha2' +import { randomBytes } from '@noble/hashes/utils' +import { type CurveFn, bls } from './abstract/bls.ts' +// Types +import { isogenyMap } from './abstract/hash-to-curve.ts' +import { Field } from './abstract/modular.ts' +import type { Fp, Fp2, Fp6, Fp12 } from './abstract/tower.ts' +import { psiFrobenius, tower12 } from './abstract/tower.ts' +import { + type Hex, + bitGet, + bitLen, + bytesToHex, + bytesToNumberBE, + concatBytes as concatB, + ensureBytes, + numberToBytesBE, +} from './abstract/utils.ts' +import { + type AffinePoint, + type ProjPointType, + mapToCurveSimpleSWU, +} from './abstract/weierstrass.ts' + +// Be friendly to bad ECMAScript parsers by not using bigint literals +// prettier-ignore +const _0n = BigInt(0), + _1n = BigInt(1), + _2n = BigInt(2), + _3n = BigInt(3), + _4n = BigInt(4) + +// The BLS parameter x (seed) for BLS12-381. NOTE: it is negative! +const BLS_X = BigInt('0xd201000000010000') +const BLS_X_LEN = bitLen(BLS_X) + +// CURVE FIELDS +const { Fp, Fp2, Fp6, Fp4Square, Fp12 } = tower12({ + // Order of Fp + ORDER: BigInt( + '0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab', + ), + // Finite extension field over irreducible polynominal. + // Fp(u) / (u² - β) where β = -1 + FP2_NONRESIDUE: [_1n, _1n], + Fp2mulByB: ({ c0, c1 }) => { + const t0 = Fp.mul(c0, _4n) // 4 * c0 + const t1 = Fp.mul(c1, _4n) // 4 * c1 + // (T0-T1) + (T0+T1)*i + return { c0: Fp.sub(t0, t1), c1: Fp.add(t0, t1) } + }, + // Fp12 + // A cyclotomic group is a subgroup of Fp^n defined by + // GΦₙ(p) = {α ∈ Fpⁿ : α^Φₙ(p) = 1} + // The result of any pairing is in a cyclotomic subgroup + // https://eprint.iacr.org/2009/565.pdf + Fp12cyclotomicSquare: ({ c0, c1 }): Fp12 => { + const { c0: c0c0, c1: c0c1, c2: c0c2 } = c0 + const { c0: c1c0, c1: c1c1, c2: c1c2 } = c1 + const { first: t3, second: t4 } = Fp4Square(c0c0, c1c1) + const { first: t5, second: t6 } = Fp4Square(c1c0, c0c2) + const { first: t7, second: t8 } = Fp4Square(c0c1, c1c2) + const t9 = Fp2.mulByNonresidue(t8) // T8 * (u + 1) + return { + c0: Fp6.create({ + c0: Fp2.add(Fp2.mul(Fp2.sub(t3, c0c0), _2n), t3), // 2 * (T3 - c0c0) + T3 + c1: Fp2.add(Fp2.mul(Fp2.sub(t5, c0c1), _2n), t5), // 2 * (T5 - c0c1) + T5 + c2: Fp2.add(Fp2.mul(Fp2.sub(t7, c0c2), _2n), t7), + }), // 2 * (T7 - c0c2) + T7 + c1: Fp6.create({ + c0: Fp2.add(Fp2.mul(Fp2.add(t9, c1c0), _2n), t9), // 2 * (T9 + c1c0) + T9 + c1: Fp2.add(Fp2.mul(Fp2.add(t4, c1c1), _2n), t4), // 2 * (T4 + c1c1) + T4 + c2: Fp2.add(Fp2.mul(Fp2.add(t6, c1c2), _2n), t6), + }), + } // 2 * (T6 + c1c2) + T6 + }, + Fp12cyclotomicExp(num, n) { + let z = Fp12.ONE + for (let i = BLS_X_LEN - 1; i >= 0; i--) { + z = Fp12._cyclotomicSquare(z) + if (bitGet(n, i)) z = Fp12.mul(z, num) + } + return z + }, + // https://eprint.iacr.org/2010/354.pdf + // https://eprint.iacr.org/2009/565.pdf + Fp12finalExponentiate: (num) => { + const x = BLS_X + // this^(q⁶) / this + const t0 = Fp12.div(Fp12.frobeniusMap(num, 6), num) + // t0^(q²) * t0 + const t1 = Fp12.mul(Fp12.frobeniusMap(t0, 2), t0) + const t2 = Fp12.conjugate(Fp12._cyclotomicExp(t1, x)) + const t3 = Fp12.mul(Fp12.conjugate(Fp12._cyclotomicSquare(t1)), t2) + const t4 = Fp12.conjugate(Fp12._cyclotomicExp(t3, x)) + const t5 = Fp12.conjugate(Fp12._cyclotomicExp(t4, x)) + const t6 = Fp12.mul(Fp12.conjugate(Fp12._cyclotomicExp(t5, x)), Fp12._cyclotomicSquare(t2)) + const t7 = Fp12.conjugate(Fp12._cyclotomicExp(t6, x)) + const t2_t5_pow_q2 = Fp12.frobeniusMap(Fp12.mul(t2, t5), 2) + const t4_t1_pow_q3 = Fp12.frobeniusMap(Fp12.mul(t4, t1), 3) + const t6_t1c_pow_q1 = Fp12.frobeniusMap(Fp12.mul(t6, Fp12.conjugate(t1)), 1) + const t7_t3c_t1 = Fp12.mul(Fp12.mul(t7, Fp12.conjugate(t3)), t1) + // (t2 * t5)^(q²) * (t4 * t1)^(q³) * (t6 * t1.conj)^(q^1) * t7 * t3.conj * t1 + return Fp12.mul(Fp12.mul(Fp12.mul(t2_t5_pow_q2, t4_t1_pow_q3), t6_t1c_pow_q1), t7_t3c_t1) + }, +}) + +// Finite field over r. +// This particular field is not used anywhere in bls12-381, but it is still useful. +const Fr = Field(BigInt('0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001')) + +// END OF CURVE FIELDS + +// HashToCurve + +// 3-isogeny map from E' to E https://www.rfc-editor.org/rfc/rfc9380#appendix-E.3 +const isogenyMapG2 = isogenyMap( + Fp2, + [ + // xNum + [ + [ + '0x5c759507e8e333ebb5b7a9a47d7ed8532c52d39fd3a042a88b58423c50ae15d5c2638e343d9c71c6238aaaaaaaa97d6', + '0x5c759507e8e333ebb5b7a9a47d7ed8532c52d39fd3a042a88b58423c50ae15d5c2638e343d9c71c6238aaaaaaaa97d6', + ], + [ + '0x0', + '0x11560bf17baa99bc32126fced787c88f984f87adf7ae0c7f9a208c6b4f20a4181472aaa9cb8d555526a9ffffffffc71a', + ], + [ + '0x11560bf17baa99bc32126fced787c88f984f87adf7ae0c7f9a208c6b4f20a4181472aaa9cb8d555526a9ffffffffc71e', + '0x8ab05f8bdd54cde190937e76bc3e447cc27c3d6fbd7063fcd104635a790520c0a395554e5c6aaaa9354ffffffffe38d', + ], + [ + '0x171d6541fa38ccfaed6dea691f5fb614cb14b4e7f4e810aa22d6108f142b85757098e38d0f671c7188e2aaaaaaaa5ed1', + '0x0', + ], + ], + // xDen + [ + [ + '0x0', + '0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa63', + ], + [ + '0xc', + '0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa9f', + ], + ['0x1', '0x0'], // LAST 1 + ], + // yNum + [ + [ + '0x1530477c7ab4113b59a4c18b076d11930f7da5d4a07f649bf54439d87d27e500fc8c25ebf8c92f6812cfc71c71c6d706', + '0x1530477c7ab4113b59a4c18b076d11930f7da5d4a07f649bf54439d87d27e500fc8c25ebf8c92f6812cfc71c71c6d706', + ], + [ + '0x0', + '0x5c759507e8e333ebb5b7a9a47d7ed8532c52d39fd3a042a88b58423c50ae15d5c2638e343d9c71c6238aaaaaaaa97be', + ], + [ + '0x11560bf17baa99bc32126fced787c88f984f87adf7ae0c7f9a208c6b4f20a4181472aaa9cb8d555526a9ffffffffc71c', + '0x8ab05f8bdd54cde190937e76bc3e447cc27c3d6fbd7063fcd104635a790520c0a395554e5c6aaaa9354ffffffffe38f', + ], + [ + '0x124c9ad43b6cf79bfbf7043de3811ad0761b0f37a1e26286b0e977c69aa274524e79097a56dc4bd9e1b371c71c718b10', + '0x0', + ], + ], + // yDen + [ + [ + '0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffa8fb', + '0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffa8fb', + ], + [ + '0x0', + '0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffa9d3', + ], + [ + '0x12', + '0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa99', + ], + ['0x1', '0x0'], // LAST 1 + ], + ].map((i) => i.map((pair) => Fp2.fromBigTuple(pair.map(BigInt)))) as [Fp2[], Fp2[], Fp2[], Fp2[]], +) +// 11-isogeny map from E' to E +const isogenyMapG1 = isogenyMap( + Fp, + [ + // xNum + [ + '0x11a05f2b1e833340b809101dd99815856b303e88a2d7005ff2627b56cdb4e2c85610c2d5f2e62d6eaeac1662734649b7', + '0x17294ed3e943ab2f0588bab22147a81c7c17e75b2f6a8417f565e33c70d1e86b4838f2a6f318c356e834eef1b3cb83bb', + '0xd54005db97678ec1d1048c5d10a9a1bce032473295983e56878e501ec68e25c958c3e3d2a09729fe0179f9dac9edcb0', + '0x1778e7166fcc6db74e0609d307e55412d7f5e4656a8dbf25f1b33289f1b330835336e25ce3107193c5b388641d9b6861', + '0xe99726a3199f4436642b4b3e4118e5499db995a1257fb3f086eeb65982fac18985a286f301e77c451154ce9ac8895d9', + '0x1630c3250d7313ff01d1201bf7a74ab5db3cb17dd952799b9ed3ab9097e68f90a0870d2dcae73d19cd13c1c66f652983', + '0xd6ed6553fe44d296a3726c38ae652bfb11586264f0f8ce19008e218f9c86b2a8da25128c1052ecaddd7f225a139ed84', + '0x17b81e7701abdbe2e8743884d1117e53356de5ab275b4db1a682c62ef0f2753339b7c8f8c8f475af9ccb5618e3f0c88e', + '0x80d3cf1f9a78fc47b90b33563be990dc43b756ce79f5574a2c596c928c5d1de4fa295f296b74e956d71986a8497e317', + '0x169b1f8e1bcfa7c42e0c37515d138f22dd2ecb803a0c5c99676314baf4bb1b7fa3190b2edc0327797f241067be390c9e', + '0x10321da079ce07e272d8ec09d2565b0dfa7dccdde6787f96d50af36003b14866f69b771f8c285decca67df3f1605fb7b', + '0x6e08c248e260e70bd1e962381edee3d31d79d7e22c837bc23c0bf1bc24c6b68c24b1b80b64d391fa9c8ba2e8ba2d229', + ], + // xDen + [ + '0x8ca8d548cff19ae18b2e62f4bd3fa6f01d5ef4ba35b48ba9c9588617fc8ac62b558d681be343df8993cf9fa40d21b1c', + '0x12561a5deb559c4348b4711298e536367041e8ca0cf0800c0126c2588c48bf5713daa8846cb026e9e5c8276ec82b3bff', + '0xb2962fe57a3225e8137e629bff2991f6f89416f5a718cd1fca64e00b11aceacd6a3d0967c94fedcfcc239ba5cb83e19', + '0x3425581a58ae2fec83aafef7c40eb545b08243f16b1655154cca8abc28d6fd04976d5243eecf5c4130de8938dc62cd8', + '0x13a8e162022914a80a6f1d5f43e7a07dffdfc759a12062bb8d6b44e833b306da9bd29ba81f35781d539d395b3532a21e', + '0xe7355f8e4e667b955390f7f0506c6e9395735e9ce9cad4d0a43bcef24b8982f7400d24bc4228f11c02df9a29f6304a5', + '0x772caacf16936190f3e0c63e0596721570f5799af53a1894e2e073062aede9cea73b3538f0de06cec2574496ee84a3a', + '0x14a7ac2a9d64a8b230b3f5b074cf01996e7f63c21bca68a81996e1cdf9822c580fa5b9489d11e2d311f7d99bbdcc5a5e', + '0xa10ecf6ada54f825e920b3dafc7a3cce07f8d1d7161366b74100da67f39883503826692abba43704776ec3a79a1d641', + '0x95fc13ab9e92ad4476d6e3eb3a56680f682b4ee96f7d03776df533978f31c1593174e4b4b7865002d6384d168ecdd0a', + '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001', // LAST 1 + ], + // yNum + [ + '0x90d97c81ba24ee0259d1f094980dcfa11ad138e48a869522b52af6c956543d3cd0c7aee9b3ba3c2be9845719707bb33', + '0x134996a104ee5811d51036d776fb46831223e96c254f383d0f906343eb67ad34d6c56711962fa8bfe097e75a2e41c696', + '0xcc786baa966e66f4a384c86a3b49942552e2d658a31ce2c344be4b91400da7d26d521628b00523b8dfe240c72de1f6', + '0x1f86376e8981c217898751ad8746757d42aa7b90eeb791c09e4a3ec03251cf9de405aba9ec61deca6355c77b0e5f4cb', + '0x8cc03fdefe0ff135caf4fe2a21529c4195536fbe3ce50b879833fd221351adc2ee7f8dc099040a841b6daecf2e8fedb', + '0x16603fca40634b6a2211e11db8f0a6a074a7d0d4afadb7bd76505c3d3ad5544e203f6326c95a807299b23ab13633a5f0', + '0x4ab0b9bcfac1bbcb2c977d027796b3ce75bb8ca2be184cb5231413c4d634f3747a87ac2460f415ec961f8855fe9d6f2', + '0x987c8d5333ab86fde9926bd2ca6c674170a05bfe3bdd81ffd038da6c26c842642f64550fedfe935a15e4ca31870fb29', + '0x9fc4018bd96684be88c9e221e4da1bb8f3abd16679dc26c1e8b6e6a1f20cabe69d65201c78607a360370e577bdba587', + '0xe1bba7a1186bdb5223abde7ada14a23c42a0ca7915af6fe06985e7ed1e4d43b9b3f7055dd4eba6f2bafaaebca731c30', + '0x19713e47937cd1be0dfd0b8f1d43fb93cd2fcbcb6caf493fd1183e416389e61031bf3a5cce3fbafce813711ad011c132', + '0x18b46a908f36f6deb918c143fed2edcc523559b8aaf0c2462e6bfe7f911f643249d9cdf41b44d606ce07c8a4d0074d8e', + '0xb182cac101b9399d155096004f53f447aa7b12a3426b08ec02710e807b4633f06c851c1919211f20d4c04f00b971ef8', + '0x245a394ad1eca9b72fc00ae7be315dc757b3b080d4c158013e6632d3c40659cc6cf90ad1c232a6442d9d3f5db980133', + '0x5c129645e44cf1102a159f748c4a3fc5e673d81d7e86568d9ab0f5d396a7ce46ba1049b6579afb7866b1e715475224b', + '0x15e6be4e990f03ce4ea50b3b42df2eb5cb181d8f84965a3957add4fa95af01b2b665027efec01c7704b456be69c8b604', + ], + // yDen + [ + '0x16112c4c3a9c98b252181140fad0eae9601a6de578980be6eec3232b5be72e7a07f3688ef60c206d01479253b03663c1', + '0x1962d75c2381201e1a0cbd6c43c348b885c84ff731c4d59ca4a10356f453e01f78a4260763529e3532f6102c2e49a03d', + '0x58df3306640da276faaae7d6e8eb15778c4855551ae7f310c35a5dd279cd2eca6757cd636f96f891e2538b53dbf67f2', + '0x16b7d288798e5395f20d23bf89edb4d1d115c5dbddbcd30e123da489e726af41727364f2c28297ada8d26d98445f5416', + '0xbe0e079545f43e4b00cc912f8228ddcc6d19c9f0f69bbb0542eda0fc9dec916a20b15dc0fd2ededda39142311a5001d', + '0x8d9e5297186db2d9fb266eaac783182b70152c65550d881c5ecd87b6f0f5a6449f38db9dfa9cce202c6477faaf9b7ac', + '0x166007c08a99db2fc3ba8734ace9824b5eecfdfa8d0cf8ef5dd365bc400a0051d5fa9c01a58b1fb93d1a1399126a775c', + '0x16a3ef08be3ea7ea03bcddfabba6ff6ee5a4375efa1f4fd7feb34fd206357132b920f5b00801dee460ee415a15812ed9', + '0x1866c8ed336c61231a1be54fd1d74cc4f9fb0ce4c6af5920abc5750c4bf39b4852cfe2f7bb9248836b233d9d55535d4a', + '0x167a55cda70a6e1cea820597d94a84903216f763e13d87bb5308592e7ea7d4fbc7385ea3d529b35e346ef48bb8913f55', + '0x4d2f259eea405bd48f010a01ad2911d9c6dd039bb61a6290e591b36e636a5c871a5c29f4f83060400f8b49cba8f6aa8', + '0xaccbb67481d033ff5852c1e48c50c477f94ff8aefce42d28c0f9a88cea7913516f968986f7ebbea9684b529e2561092', + '0xad6b9514c767fe3c3613144b45f1496543346d98adf02267d5ceef9a00d9b8693000763e3b90ac11e99b138573345cc', + '0x2660400eb2e4f3b628bdd0d53cd76f2bf565b94e72927c1cb748df27942480e420517bd8714cc80d1fadc1326ed06f7', + '0xe0fa1d816ddc03e6b24255e0d7819c171c40f65e273b853324efcd6356caa205ca2f570f13497804415473a1d634b8f', + '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001', // LAST 1 + ], + ].map((i) => i.map((j) => BigInt(j))) as [Fp[], Fp[], Fp[], Fp[]], +) + +// SWU Map - Fp2 to G2': y² = x³ + 240i * x + 1012 + 1012i +const G2_SWU = mapToCurveSimpleSWU(Fp2, { + A: Fp2.create({ c0: Fp.create(_0n), c1: Fp.create(BigInt(240)) }), // A' = 240 * I + B: Fp2.create({ c0: Fp.create(BigInt(1012)), c1: Fp.create(BigInt(1012)) }), // B' = 1012 * (1 + I) + Z: Fp2.create({ c0: Fp.create(BigInt(-2)), c1: Fp.create(BigInt(-1)) }), // Z: -(2 + I) +}) +// Optimized SWU Map - Fp to G1 +const G1_SWU = mapToCurveSimpleSWU(Fp, { + A: Fp.create( + BigInt( + '0x144698a3b8e9433d693a02c96d4982b0ea985383ee66a8d8e8981aefd881ac98936f8da0e0f97f5cf428082d584c1d', + ), + ), + B: Fp.create( + BigInt( + '0x12e2908d11688030018b12e8753eee3b2016c1f0f24f4070a0b9c14fcef35ef55a23215a316ceaa5d1cc48e98e172be0', + ), + ), + Z: Fp.create(BigInt(11)), +}) + +// Endomorphisms (for fast cofactor clearing) +// Ψ(P) endomorphism +const { G2psi, G2psi2 } = psiFrobenius(Fp, Fp2, Fp2.div(Fp2.ONE, Fp2.NONRESIDUE)) // 1/(u+1) + +// Default hash_to_field options are for hash to G2. +// +// Parameter definitions are in section 5.3 of the spec unless otherwise noted. +// Parameter values come from section 8.8.2 of the spec. +// https://www.rfc-editor.org/rfc/rfc9380#section-8.8.2 +// +// Base field F is GF(p^m) +// p = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab +// m = 2 (or 1 for G1 see section 8.8.1) +// k = 128 +const htfDefaults = Object.freeze({ + // DST: a domain separation tag + // defined in section 2.2.5 + // Use utils.getDSTLabel(), utils.setDSTLabel(value) + DST: 'BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_', + encodeDST: 'BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_', + // p: the characteristic of F + // where F is a finite field of characteristic p and order q = p^m + p: Fp.ORDER, + // m: the extension degree of F, m >= 1 + // where F is a finite field of characteristic p and order q = p^m + m: 2, + // k: the target security level for the suite in bits + // defined in section 5.1 + k: 128, + // option to use a message that has already been processed by + // expand_message_xmd + expand: 'xmd', + // Hash functions for: expand_message_xmd is appropriate for use with a + // wide range of hash functions, including SHA-2, SHA-3, BLAKE2, and others. + // BBS+ uses blake2: https://github.com/hyperledger/aries-framework-go/issues/2247 + hash: sha256, +} as const) + +// Encoding utils +// Point on G1 curve: (x, y) + +// Compressed point of infinity +const COMPRESSED_ZERO = setMask(Fp.toBytes(_0n), { infinity: true, compressed: true }) // set compressed & point-at-infinity bits + +function parseMask(bytes: Uint8Array) { + // Copy, so we can remove mask data. It will be removed also later, when Fp.create will call modulo. + bytes = bytes.slice() + const mask = bytes[0] & 0b1110_0000 + const compressed = !!((mask >> 7) & 1) // compression bit (0b1000_0000) + const infinity = !!((mask >> 6) & 1) // point at infinity bit (0b0100_0000) + const sort = !!((mask >> 5) & 1) // sort bit (0b0010_0000) + bytes[0] &= 0b0001_1111 // clear mask (zero first 3 bits) + return { compressed, infinity, sort, value: bytes } +} + +function setMask( + bytes: Uint8Array, + mask: { compressed?: boolean; infinity?: boolean; sort?: boolean }, +) { + if (bytes[0] & 0b1110_0000) throw new Error('setMask: non-empty mask') + if (mask.compressed) bytes[0] |= 0b1000_0000 + if (mask.infinity) bytes[0] |= 0b0100_0000 + if (mask.sort) bytes[0] |= 0b0010_0000 + return bytes +} + +function signatureG1ToRawBytes(point: ProjPointType) { + point.assertValidity() + const isZero = point.equals(bls12_381.G1.ProjectivePoint.ZERO) + const { x, y } = point.toAffine() + if (isZero) return COMPRESSED_ZERO.slice() + const P = Fp.ORDER + const sort = Boolean((y * _2n) / P) + return setMask(numberToBytesBE(x, Fp.BYTES), { compressed: true, sort }) +} + +function signatureG2ToRawBytes(point: ProjPointType) { + // NOTE: by some reasons it was missed in bls12-381, looks like bug + point.assertValidity() + const len = Fp.BYTES + if (point.equals(bls12_381.G2.ProjectivePoint.ZERO)) + return concatB(COMPRESSED_ZERO, numberToBytesBE(_0n, len)) + const { x, y } = point.toAffine() + const { re: x0, im: x1 } = Fp2.reim(x) + const { re: y0, im: y1 } = Fp2.reim(y) + const tmp = y1 > _0n ? y1 * _2n : y0 * _2n + const sort = Boolean((tmp / Fp.ORDER) & _1n) + const z2 = x0 + return concatB( + setMask(numberToBytesBE(x1, len), { sort, compressed: true }), + numberToBytesBE(z2, len), + ) +} + +/** + * bls12-381 pairing-friendly curve. + * @example + * import { bls12_381 as bls } from '@noble/curves/bls12-381'; + * // G1 keys, G2 signatures + * const privateKey = '67d53f170b908cabb9eb326c3c337762d59289a8fec79f7bc9254b584b73265c'; + * const message = '64726e3da8'; + * const publicKey = bls.getPublicKey(privateKey); + * const signature = bls.sign(message, privateKey); + * const isValid = bls.verify(signature, message, publicKey); + */ +export const bls12_381: CurveFn = bls({ + // Fields + fields: { + Fp, + Fp2, + Fp6, + Fp12, + Fr, + }, + // G1 is the order-q subgroup of E1(Fp) : y² = x³ + 4, #E1(Fp) = h1q, where + // characteristic; z + (z⁴ - z² + 1)(z - 1)²/3 + G1: { + Fp, + // cofactor; (z - 1)²/3 + h: BigInt('0x396c8c005555e1568c00aaab0000aaab'), + // generator's coordinates + // x = 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 + // y = 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 + Gx: BigInt( + '0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb', + ), + Gy: BigInt( + '0x08b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1', + ), + a: Fp.ZERO, + b: _4n, + htfDefaults: { ...htfDefaults, m: 1, DST: 'BLS_SIG_BLS12381G1_XMD:SHA-256_SSWU_RO_NUL_' }, + wrapPrivateKey: true, + allowInfinityPoint: true, + // Checks is the point resides in prime-order subgroup. + // point.isTorsionFree() should return true for valid points + // It returns false for shitty points. + // https://eprint.iacr.org/2021/1130.pdf + isTorsionFree: (c, point): boolean => { + // φ endomorphism + const cubicRootOfUnityModP = BigInt( + '0x5f19672fdf76ce51ba69c6076a0f77eaddb3a93be6f89688de17d813620a00022e01fffffffefffe', + ) + const phi = new c(Fp.mul(point.px, cubicRootOfUnityModP), point.py, point.pz) + + // todo: unroll + const xP = point.multiplyUnsafe(BLS_X).negate() // [x]P + const u2P = xP.multiplyUnsafe(BLS_X) // [u2]P + return u2P.equals(phi) + + // https://eprint.iacr.org/2019/814.pdf + // (z² − 1)/3 + // const c1 = BigInt('0x396c8c005555e1560000000055555555'); + // const P = this; + // const S = P.sigma(); + // const Q = S.double(); + // const S2 = S.sigma(); + // // [(z² − 1)/3](2σ(P) − P − σ²(P)) − σ²(P) = O + // const left = Q.subtract(P).subtract(S2).multiplyUnsafe(c1); + // const C = left.subtract(S2); + // return C.isZero(); + }, + // Clear cofactor of G1 + // https://eprint.iacr.org/2019/403 + clearCofactor: (_c, point) => { + // return this.multiplyUnsafe(CURVE.h); + return point.multiplyUnsafe(BLS_X).add(point) // x*P + P + }, + mapToCurve: (scalars: bigint[]) => { + console.log('Input!', scalars, Fp.create(scalars[0])) + const { x, y } = G1_SWU(Fp.create(scalars[0])) + console.log('XVAL', x) + return isogenyMapG1(x, y) + }, + fromBytes: (bytes: Uint8Array): AffinePoint => { + const { compressed, infinity, sort, value } = parseMask(bytes) + if (value.length === 48 && compressed) { + // TODO: Fp.bytes + const P = Fp.ORDER + const compressedValue = bytesToNumberBE(value) + // Zero + const x = Fp.create(compressedValue & Fp.MASK) + if (infinity) { + if (x !== _0n) throw new Error('G1: non-empty compressed point at infinity') + return { x: _0n, y: _0n } + } + const right = Fp.add(Fp.pow(x, _3n), Fp.create(bls12_381.params.G1b)) // y² = x³ + b + let y = Fp.sqrt(right) + if (!y) throw new Error('invalid compressed G1 point') + if ((y * _2n) / P !== BigInt(sort)) y = Fp.neg(y) + return { x: Fp.create(x), y: Fp.create(y) } + } else if (value.length === 96 && !compressed) { + // Check if the infinity flag is set + const x = bytesToNumberBE(value.subarray(0, Fp.BYTES)) + const y = bytesToNumberBE(value.subarray(Fp.BYTES)) + if (infinity) { + if (x !== _0n || y !== _0n) throw new Error('G1: non-empty point at infinity') + return bls12_381.G1.ProjectivePoint.ZERO.toAffine() + } + return { x: Fp.create(x), y: Fp.create(y) } + } else { + throw new Error('invalid point G1, expected 48/96 bytes') + } + }, + toBytes: (c, point, isCompressed) => { + const isZero = point.equals(c.ZERO) + const { x, y } = point.toAffine() + if (isCompressed) { + if (isZero) return COMPRESSED_ZERO.slice() + const P = Fp.ORDER + const sort = Boolean((y * _2n) / P) + return setMask(numberToBytesBE(x, Fp.BYTES), { compressed: true, sort }) + } else { + if (isZero) { + // 2x PUBLIC_KEY_LENGTH + const x = concatB(new Uint8Array([0x40]), new Uint8Array(2 * Fp.BYTES - 1)) + return x + } else { + return concatB(numberToBytesBE(x, Fp.BYTES), numberToBytesBE(y, Fp.BYTES)) + } + } + }, + ShortSignature: { + fromHex(hex: Hex): ProjPointType { + const { infinity, sort, value } = parseMask(ensureBytes('signatureHex', hex, 48)) + const P = Fp.ORDER + const compressedValue = bytesToNumberBE(value) + // Zero + if (infinity) return bls12_381.G1.ProjectivePoint.ZERO + const x = Fp.create(compressedValue & Fp.MASK) + const right = Fp.add(Fp.pow(x, _3n), Fp.create(bls12_381.params.G1b)) // y² = x³ + b + let y = Fp.sqrt(right) + if (!y) throw new Error('invalid compressed G1 point') + const aflag = BigInt(sort) + if ((y * _2n) / P !== aflag) y = Fp.neg(y) + const point = bls12_381.G1.ProjectivePoint.fromAffine({ x, y }) + point.assertValidity() + return point + }, + toRawBytes(point: ProjPointType) { + return signatureG1ToRawBytes(point) + }, + toHex(point: ProjPointType) { + return bytesToHex(signatureG1ToRawBytes(point)) + }, + }, + }, + // G2 is the order-q subgroup of E2(Fp²) : y² = x³+4(1+√−1), + // where Fp2 is Fp[√−1]/(x2+1). #E2(Fp2 ) = h2q, where + // G² - 1 + // h2q + G2: { + Fp: Fp2, + // cofactor + h: BigInt( + '0x5d543a95414e7f1091d50792876a202cd91de4547085abaa68a205b2e5a7ddfa628f1cb4d9e82ef21537e293a6691ae1616ec6e786f0c70cf1c38e31c7238e5', + ), + Gx: Fp2.fromBigTuple([ + BigInt( + '0x024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8', + ), + BigInt( + '0x13e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e', + ), + ]), + // y = + // 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582, + // 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 + Gy: Fp2.fromBigTuple([ + BigInt( + '0x0ce5d527727d6e118cc9cdc6da2e351aadfd9baa8cbdd3a76d429a695160d12c923ac9cc3baca289e193548608b82801', + ), + BigInt( + '0x0606c4a02ea734cc32acd2b02bc28b99cb3e287e85a763af267492ab572e99ab3f370d275cec1da1aaa9075ff05f79be', + ), + ]), + a: Fp2.ZERO, + b: Fp2.fromBigTuple([_4n, _4n]), + hEff: BigInt( + '0xbc69f08f2ee75b3584c6a0ea91b352888e2a8e9145ad7689986ff031508ffe1329c2f178731db956d82bf015d1212b02ec0ec69d7477c1ae954cbc06689f6a359894c0adebbf6b4e8020005aaa95551', + ), + htfDefaults: { ...htfDefaults }, + wrapPrivateKey: true, + allowInfinityPoint: true, + mapToCurve: (scalars: bigint[]) => { + const { x, y } = G2_SWU(Fp2.fromBigTuple(scalars)) + return isogenyMapG2(x, y) + }, + // Checks is the point resides in prime-order subgroup. + // point.isTorsionFree() should return true for valid points + // It returns false for shitty points. + // https://eprint.iacr.org/2021/1130.pdf + isTorsionFree: (c, P): boolean => { + return P.multiplyUnsafe(BLS_X).negate().equals(G2psi(c, P)) // ψ(P) == [u](P) + // Older version: https://eprint.iacr.org/2019/814.pdf + // Ψ²(P) => Ψ³(P) => [z]Ψ³(P) where z = -x => [z]Ψ³(P) - Ψ²(P) + P == O + // return P.psi2().psi().mulNegX().subtract(psi2).add(P).isZero(); + }, + // Maps the point into the prime-order subgroup G2. + // clear_cofactor_bls12381_g2 from cfrg-hash-to-curve-11 + // https://eprint.iacr.org/2017/419.pdf + // prettier-ignore + clearCofactor: (c, P) => { + const x = BLS_X + let t1 = P.multiplyUnsafe(x).negate() // [-x]P + let t2 = G2psi(c, P) // Ψ(P) + let t3 = P.double() // 2P + t3 = G2psi2(c, t3) // Ψ²(2P) + t3 = t3.subtract(t2) // Ψ²(2P) - Ψ(P) + t2 = t1.add(t2) // [-x]P + Ψ(P) + t2 = t2.multiplyUnsafe(x).negate() // [x²]P - [x]Ψ(P) + t3 = t3.add(t2) // Ψ²(2P) - Ψ(P) + [x²]P - [x]Ψ(P) + t3 = t3.subtract(t1) // Ψ²(2P) - Ψ(P) + [x²]P - [x]Ψ(P) + [x]P + const Q = t3.subtract(P) // Ψ²(2P) - Ψ(P) + [x²]P - [x]Ψ(P) + [x]P - 1P + return Q // [x²-x-1]P + [x-1]Ψ(P) + Ψ²(2P) + }, + fromBytes: (bytes: Uint8Array): AffinePoint => { + const { compressed, infinity, sort, value } = parseMask(bytes) + if ( + (!compressed && !infinity && sort) || // 00100000 + (!compressed && infinity && sort) || // 01100000 + (sort && infinity && compressed) // 11100000 + ) { + throw new Error('invalid encoding flag: ' + (bytes[0] & 0b1110_0000)) + } + const L = Fp.BYTES + const slc = (b: Uint8Array, from: number, to?: number) => bytesToNumberBE(b.slice(from, to)) + if (value.length === 96 && compressed) { + const b = bls12_381.params.G2b + const P = Fp.ORDER + if (infinity) { + // check that all bytes are 0 + if (value.reduce((p, c) => (p !== 0 ? c + 1 : c), 0) > 0) { + throw new Error('invalid compressed G2 point') + } + return { x: Fp2.ZERO, y: Fp2.ZERO } + } + const x_1 = slc(value, 0, L) + const x_0 = slc(value, L, 2 * L) + const x = Fp2.create({ c0: Fp.create(x_0), c1: Fp.create(x_1) }) + const right = Fp2.add(Fp2.pow(x, _3n), b) // y² = x³ + 4 * (u+1) = x³ + b + let y = Fp2.sqrt(right) + const Y_bit = y.c1 === _0n ? (y.c0 * _2n) / P : (y.c1 * _2n) / P ? _1n : _0n + y = sort && Y_bit > 0 ? y : Fp2.neg(y) + return { x, y } + } else if (value.length === 192 && !compressed) { + if (infinity) { + if (value.reduce((p, c) => (p !== 0 ? c + 1 : c), 0) > 0) { + throw new Error('invalid uncompressed G2 point') + } + return { x: Fp2.ZERO, y: Fp2.ZERO } + } + const x1 = slc(value, 0, L) + const x0 = slc(value, L, 2 * L) + const y1 = slc(value, 2 * L, 3 * L) + const y0 = slc(value, 3 * L, 4 * L) + return { x: Fp2.fromBigTuple([x0, x1]), y: Fp2.fromBigTuple([y0, y1]) } + } else { + throw new Error('invalid point G2, expected 96/192 bytes') + } + }, + toBytes: (c, point, isCompressed) => { + const { BYTES: len, ORDER: P } = Fp + const isZero = point.equals(c.ZERO) + const { x, y } = point.toAffine() + if (isCompressed) { + if (isZero) return concatB(COMPRESSED_ZERO, numberToBytesBE(_0n, len)) + const flag = Boolean(y.c1 === _0n ? (y.c0 * _2n) / P : (y.c1 * _2n) / P) + return concatB( + setMask(numberToBytesBE(x.c1, len), { compressed: true, sort: flag }), + numberToBytesBE(x.c0, len), + ) + } else { + if (isZero) return concatB(new Uint8Array([0x40]), new Uint8Array(4 * len - 1)) // bytes[0] |= 1 << 6; + const { re: x0, im: x1 } = Fp2.reim(x) + const { re: y0, im: y1 } = Fp2.reim(y) + return concatB( + numberToBytesBE(x1, len), + numberToBytesBE(x0, len), + numberToBytesBE(y1, len), + numberToBytesBE(y0, len), + ) + } + }, + Signature: { + // TODO: Optimize, it's very slow because of sqrt. + fromHex(hex: Hex): ProjPointType { + const { infinity, sort, value } = parseMask(ensureBytes('signatureHex', hex)) + const P = Fp.ORDER + const half = value.length / 2 + if (half !== 48 && half !== 96) + throw new Error('invalid compressed signature length, must be 96 or 192') + const z1 = bytesToNumberBE(value.slice(0, half)) + const z2 = bytesToNumberBE(value.slice(half)) + // Indicates the infinity point + if (infinity) return bls12_381.G2.ProjectivePoint.ZERO + const x1 = Fp.create(z1 & Fp.MASK) + const x2 = Fp.create(z2) + const x = Fp2.create({ c0: x2, c1: x1 }) + const y2 = Fp2.add(Fp2.pow(x, _3n), bls12_381.params.G2b) // y² = x³ + 4 + // The slow part + let y = Fp2.sqrt(y2) + if (!y) throw new Error('Failed to find a square root') + + // Choose the y whose leftmost bit of the imaginary part is equal to the a_flag1 + // If y1 happens to be zero, then use the bit of y0 + const { re: y0, im: y1 } = Fp2.reim(y) + const aflag1 = BigInt(sort) + const isGreater = y1 > _0n && (y1 * _2n) / P !== aflag1 + const isZero = y1 === _0n && (y0 * _2n) / P !== aflag1 + if (isGreater || isZero) y = Fp2.neg(y) + const point = bls12_381.G2.ProjectivePoint.fromAffine({ x, y }) + point.assertValidity() + return point + }, + toRawBytes(point: ProjPointType) { + return signatureG2ToRawBytes(point) + }, + toHex(point: ProjPointType) { + return bytesToHex(signatureG2ToRawBytes(point)) + }, + }, + }, + params: { + ateLoopSize: BLS_X, // The BLS parameter x for BLS12-381 + r: Fr.ORDER, // order; z⁴ − z² + 1; CURVE.n from other curves + xNegative: true, + twistType: 'multiplicative', + }, + htfDefaults, + hash: sha256, + randomBytes, +}) diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/bn254.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/bn254.ts new file mode 100644 index 00000000000..f2ffdb887ac --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/bn254.ts @@ -0,0 +1,255 @@ +/** + * bn254, previously known as alt_bn_128, when it had 128-bit security. + +Barbulescu-Duquesne 2017 shown it's weaker: just about 100 bits, +so the naming has been adjusted to its prime bit count: +https://hal.science/hal-01534101/file/main.pdf. +Compatible with EIP-196 and EIP-197. + +There are huge compatibility issues in the ecosystem: + +1. Different libraries call it in different ways: "bn254", "bn256", "alt_bn128", "bn128". +2. libff has bn128, but it's a different curve with different G2: + https://github.com/scipr-lab/libff/blob/a44f482e18b8ac04d034c193bd9d7df7817ad73f/libff/algebra/curves/bn128/bn128_init.cpp#L166-L169 +3. halo2curves bn256 is also incompatible and returns different outputs + +The goal of our implementation is to support "Ethereum" variant of the curve, +because it at least has specs: + +- EIP196 (https://eips.ethereum.org/EIPS/eip-196) describes bn254 ECADD and ECMUL opcodes for EVM +- EIP197 (https://eips.ethereum.org/EIPS/eip-197) describes bn254 pairings +- It's hard: EIPs don't have proper tests. EIP-197 returns boolean output instead of Fp12 +- The existing implementations are bad. Some are deprecated: + - https://github.com/paritytech/bn (old version) + - https://github.com/ewasm/ethereum-bn128.rs (uses paritytech/bn) + - https://github.com/zcash-hackworks/bn + - https://github.com/arkworks-rs/curves/blob/master/bn254/src/lib.rs +- Python implementations use different towers and produce different Fp12 outputs: + - https://github.com/ethereum/py_pairing + - https://github.com/ethereum/execution-specs/blob/master/src/ethereum/crypto/alt_bn128.py +- Points are encoded differently in different implementations + +### Params +Seed (X): 4965661367192848881 +Fr: (36x⁴+36x³+18x²+6x+1) +Fp: (36x⁴+36x³+24x²+6x+1) +(E / Fp ): Y² = X³+3 +(Et / Fp²): Y² = X³+3/(u+9) (D-type twist) +Ate loop size: 6x+2 + +### Towers +- Fp²[u] = Fp/u²+1 +- Fp⁶[v] = Fp²/v³-9-u +- Fp¹²[w] = Fp⁶/w²-v + + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { sha256 } from '@noble/hashes/sha2' +import { randomBytes } from '@noble/hashes/utils' +import { getHash } from './_shortw_utils.ts' +import { + type CurveFn as BLSCurveFn, + type PostPrecomputeFn, + type PostPrecomputePointAddFn, + bls, +} from './abstract/bls.ts' +import { Field } from './abstract/modular.ts' +import type { Fp, Fp2, Fp6, Fp12 } from './abstract/tower.ts' +import { psiFrobenius, tower12 } from './abstract/tower.ts' +import { bitGet, bitLen, notImplemented } from './abstract/utils.ts' +import { type CurveFn, weierstrass } from './abstract/weierstrass.ts' +// prettier-ignore +const _1n = BigInt(1), + _2n = BigInt(2), + _3n = BigInt(3) +const _6n = BigInt(6) + +const BN_X = BigInt('4965661367192848881') +const BN_X_LEN = bitLen(BN_X) +const SIX_X_SQUARED = _6n * BN_X ** _2n + +// Finite field over r. It's for convenience and is not used in the code below. +const Fr = Field( + BigInt('21888242871839275222246405745257275088548364400416034343698204186575808495617'), +) +// Fp2.div(Fp2.mul(Fp2.ONE, _3n), Fp2.NONRESIDUE) +const Fp2B = { + c0: BigInt('19485874751759354771024239261021720505790618469301721065564631296452457478373'), + c1: BigInt('266929791119991161246907387137283842545076965332900288569378510910307636690'), +} + +const { Fp, Fp2, Fp6, Fp4Square, Fp12 } = tower12({ + ORDER: BigInt('21888242871839275222246405745257275088696311157297823662689037894645226208583'), + FP2_NONRESIDUE: [BigInt(9), _1n], + Fp2mulByB: (num) => Fp2.mul(num, Fp2B), + // The result of any pairing is in a cyclotomic subgroup + // https://eprint.iacr.org/2009/565.pdf + Fp12cyclotomicSquare: ({ c0, c1 }): Fp12 => { + const { c0: c0c0, c1: c0c1, c2: c0c2 } = c0 + const { c0: c1c0, c1: c1c1, c2: c1c2 } = c1 + const { first: t3, second: t4 } = Fp4Square(c0c0, c1c1) + const { first: t5, second: t6 } = Fp4Square(c1c0, c0c2) + const { first: t7, second: t8 } = Fp4Square(c0c1, c1c2) + let t9 = Fp2.mulByNonresidue(t8) // T8 * (u + 1) + return { + c0: Fp6.create({ + c0: Fp2.add(Fp2.mul(Fp2.sub(t3, c0c0), _2n), t3), // 2 * (T3 - c0c0) + T3 + c1: Fp2.add(Fp2.mul(Fp2.sub(t5, c0c1), _2n), t5), // 2 * (T5 - c0c1) + T5 + c2: Fp2.add(Fp2.mul(Fp2.sub(t7, c0c2), _2n), t7), + }), // 2 * (T7 - c0c2) + T7 + c1: Fp6.create({ + c0: Fp2.add(Fp2.mul(Fp2.add(t9, c1c0), _2n), t9), // 2 * (T9 + c1c0) + T9 + c1: Fp2.add(Fp2.mul(Fp2.add(t4, c1c1), _2n), t4), // 2 * (T4 + c1c1) + T4 + c2: Fp2.add(Fp2.mul(Fp2.add(t6, c1c2), _2n), t6), + }), + } // 2 * (T6 + c1c2) + T6 + }, + Fp12cyclotomicExp(num, n) { + let z = Fp12.ONE + for (let i = BN_X_LEN - 1; i >= 0; i--) { + z = Fp12._cyclotomicSquare(z) + if (bitGet(n, i)) z = Fp12.mul(z, num) + } + return z + }, + // https://eprint.iacr.org/2010/354.pdf + // https://eprint.iacr.org/2009/565.pdf + Fp12finalExponentiate: (num) => { + const powMinusX = (num: Fp12) => Fp12.conjugate(Fp12._cyclotomicExp(num, BN_X)) + const r0 = Fp12.mul(Fp12.conjugate(num), Fp12.inv(num)) + const r = Fp12.mul(Fp12.frobeniusMap(r0, 2), r0) + const y1 = Fp12._cyclotomicSquare(powMinusX(r)) + const y2 = Fp12.mul(Fp12._cyclotomicSquare(y1), y1) + const y4 = powMinusX(y2) + const y6 = powMinusX(Fp12._cyclotomicSquare(y4)) + const y8 = Fp12.mul(Fp12.mul(Fp12.conjugate(y6), y4), Fp12.conjugate(y2)) + const y9 = Fp12.mul(y8, y1) + return Fp12.mul( + Fp12.frobeniusMap(Fp12.mul(Fp12.conjugate(r), y9), 3), + Fp12.mul( + Fp12.frobeniusMap(y8, 2), + Fp12.mul(Fp12.frobeniusMap(y9, 1), Fp12.mul(Fp12.mul(y8, y4), r)), + ), + ) + }, +}) + +// END OF CURVE FIELDS +const { G2psi, psi } = psiFrobenius(Fp, Fp2, Fp2.NONRESIDUE) + +/* +No hashToCurve for now (and signatures): + +- RFC 9380 doesn't mention bn254 and doesn't provide test vectors +- Overall seems like nobody is using BLS signatures on top of bn254 +- Seems like it can utilize SVDW, which is not implemented yet +*/ +const htfDefaults = Object.freeze({ + // DST: a domain separation tag defined in section 2.2.5 + DST: 'BN254G2_XMD:SHA-256_SVDW_RO_', + encodeDST: 'BN254G2_XMD:SHA-256_SVDW_RO_', + p: Fp.ORDER, + m: 2, + k: 128, + expand: 'xmd', + hash: sha256, +} as const) + +export const _postPrecompute: PostPrecomputeFn = ( + Rx: Fp2, + Ry: Fp2, + Rz: Fp2, + Qx: Fp2, + Qy: Fp2, + pointAdd: PostPrecomputePointAddFn, +) => { + const q = psi(Qx, Qy) + ;({ Rx, Ry, Rz } = pointAdd(Rx, Ry, Rz, q[0], q[1])) + const q2 = psi(q[0], q[1]) + pointAdd(Rx, Ry, Rz, q2[0], Fp2.neg(q2[1])) +} + +/** + * bn254 (a.k.a. alt_bn128) pairing-friendly curve. + * Contains G1 / G2 operations and pairings. + */ +export const bn254: BLSCurveFn = bls({ + // Fields + fields: { Fp, Fp2, Fp6, Fp12, Fr }, + G1: { + Fp, + h: BigInt(1), + Gx: BigInt(1), + Gy: BigInt(2), + a: Fp.ZERO, + b: _3n, + htfDefaults: { ...htfDefaults, m: 1, DST: 'BN254G2_XMD:SHA-256_SVDW_RO_' }, + wrapPrivateKey: true, + allowInfinityPoint: true, + mapToCurve: notImplemented, + fromBytes: notImplemented, + toBytes: notImplemented, + ShortSignature: { + fromHex: notImplemented, + toRawBytes: notImplemented, + toHex: notImplemented, + }, + }, + G2: { + Fp: Fp2, + // cofactor: (36 * X^4) + (36 * X^3) + (30 * X^2) + 6*X + 1 + h: BigInt('21888242871839275222246405745257275088844257914179612981679871602714643921549'), + Gx: Fp2.fromBigTuple([ + BigInt('10857046999023057135944570762232829481370756359578518086990519993285655852781'), + BigInt('11559732032986387107991004021392285783925812861821192530917403151452391805634'), + ]), + Gy: Fp2.fromBigTuple([ + BigInt('8495653923123431417604973247489272438418190587263600148770280649306958101930'), + BigInt('4082367875863433681332203403145435568316851327593401208105741076214120093531'), + ]), + a: Fp2.ZERO, + b: Fp2B, + hEff: BigInt('21888242871839275222246405745257275088844257914179612981679871602714643921549'), + htfDefaults: { ...htfDefaults }, + wrapPrivateKey: true, + allowInfinityPoint: true, + isTorsionFree: (c, P) => P.multiplyUnsafe(SIX_X_SQUARED).equals(G2psi(c, P)), // [p]P = [6X^2]P + mapToCurve: notImplemented, + fromBytes: notImplemented, + toBytes: notImplemented, + Signature: { + fromHex: notImplemented, + toRawBytes: notImplemented, + toHex: notImplemented, + }, + }, + params: { + ateLoopSize: BN_X * _6n + _2n, + r: Fr.ORDER, + xNegative: false, + twistType: 'divisive', + }, + htfDefaults, + hash: sha256, + randomBytes, + + postPrecompute: _postPrecompute, +}) + +/** + * bn254 weierstrass curve with ECDSA. + * This is very rare and probably not used anywhere. + * Instead, you should use G1 / G2, defined above. + * @deprecated + */ +export const bn254_weierstrass: CurveFn = weierstrass({ + a: BigInt(0), + b: BigInt(3), + Fp, + n: BigInt('21888242871839275222246405745257275088548364400416034343698204186575808495617'), + Gx: BigInt(1), + Gy: BigInt(2), + h: BigInt(1), + ...getHash(sha256), +}) diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/ed25519.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/ed25519.ts new file mode 100644 index 00000000000..138d05b2e04 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/ed25519.ts @@ -0,0 +1,554 @@ +/** + * ed25519 Twisted Edwards curve with following addons: + * - X25519 ECDH + * - Ristretto cofactor elimination + * - Elligator hash-to-group / point indistinguishability + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { sha512 } from '@noble/hashes/sha2' +import { concatBytes, randomBytes, utf8ToBytes } from '@noble/hashes/utils' +import { type AffinePoint, type Group, pippenger } from './abstract/curve.ts' +import { type CurveFn, type ExtPointType, twistedEdwards } from './abstract/edwards.ts' +import { + type HTFMethod, + type Hasher, + createHasher, + expand_message_xmd, + type htfBasicOpts, +} from './abstract/hash-to-curve.ts' +import { Field, FpSqrtEven, isNegativeLE, mod, pow2 } from './abstract/modular.ts' +import { type CurveFn as XCurveFn, montgomery } from './abstract/montgomery.ts' +import { + type Hex, + bytesToHex, + bytesToNumberLE, + ensureBytes, + equalBytes, + numberToBytesLE, +} from './abstract/utils.ts' + +// 2n**255n - 19n +const ED25519_P = BigInt( + '57896044618658097711785492504343953926634992332820282019728792003956564819949', +) +// √(-1) aka √(a) aka 2^((p-1)/4) +// Fp.sqrt(Fp.neg(1)) +const ED25519_SQRT_M1 = /* @__PURE__ */ BigInt( + '19681161376707505956807079304988542015446066515923890162744021073123829784752', +) + +// prettier-ignore +const _0n = BigInt(0), + _1n = BigInt(1), + _2n = BigInt(2), + _3n = BigInt(3) +// prettier-ignore +const _5n = BigInt(5), + _8n = BigInt(8) + +function ed25519_pow_2_252_3(x: bigint) { + // prettier-ignore + const _10n = BigInt(10), + _20n = BigInt(20), + _40n = BigInt(40), + _80n = BigInt(80) + const P = ED25519_P + const x2 = (x * x) % P + const b2 = (x2 * x) % P // x^3, 11 + const b4 = (pow2(b2, _2n, P) * b2) % P // x^15, 1111 + const b5 = (pow2(b4, _1n, P) * x) % P // x^31 + const b10 = (pow2(b5, _5n, P) * b5) % P + const b20 = (pow2(b10, _10n, P) * b10) % P + const b40 = (pow2(b20, _20n, P) * b20) % P + const b80 = (pow2(b40, _40n, P) * b40) % P + const b160 = (pow2(b80, _80n, P) * b80) % P + const b240 = (pow2(b160, _80n, P) * b80) % P + const b250 = (pow2(b240, _10n, P) * b10) % P + const pow_p_5_8 = (pow2(b250, _2n, P) * x) % P + // ^ To pow to (p+3)/8, multiply it by x. + return { pow_p_5_8, b2 } +} + +function adjustScalarBytes(bytes: Uint8Array): Uint8Array { + // Section 5: For X25519, in order to decode 32 random bytes as an integer scalar, + // set the three least significant bits of the first byte + bytes[0] &= 248 // 0b1111_1000 + // and the most significant bit of the last to zero, + bytes[31] &= 127 // 0b0111_1111 + // set the second most significant bit of the last byte to 1 + bytes[31] |= 64 // 0b0100_0000 + return bytes +} + +// sqrt(u/v) +function uvRatio(u: bigint, v: bigint): { isValid: boolean; value: bigint } { + const P = ED25519_P + const v3 = mod(v * v * v, P) // v³ + const v7 = mod(v3 * v3 * v, P) // v⁷ + // (p+3)/8 and (p-5)/8 + const pow = ed25519_pow_2_252_3(u * v7).pow_p_5_8 + let x = mod(u * v3 * pow, P) // (uv³)(uv⁷)^(p-5)/8 + const vx2 = mod(v * x * x, P) // vx² + const root1 = x // First root candidate + const root2 = mod(x * ED25519_SQRT_M1, P) // Second root candidate + const useRoot1 = vx2 === u // If vx² = u (mod p), x is a square root + const useRoot2 = vx2 === mod(-u, P) // If vx² = -u, set x <-- x * 2^((p-1)/4) + const noRoot = vx2 === mod(-u * ED25519_SQRT_M1, P) // There is no valid root, vx² = -u√(-1) + if (useRoot1) x = root1 + if (useRoot2 || noRoot) x = root2 // We return root2 anyway, for const-time + if (isNegativeLE(x, P)) x = mod(-x, P) + return { isValid: useRoot1 || useRoot2, value: x } +} + +/** Weird / bogus points, useful for debugging. */ +export const ED25519_TORSION_SUBGROUP: string[] = [ + '0100000000000000000000000000000000000000000000000000000000000000', + 'c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac037a', + '0000000000000000000000000000000000000000000000000000000000000080', + '26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc05', + 'ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f', + '26e8958fc2b227b045c3f489f2ef98f0d5dfac05d3c63339b13802886d53fc85', + '0000000000000000000000000000000000000000000000000000000000000000', + 'c7176a703d4dd84fba3c0b760d10670f2a2053fa2c39ccc64ec7fd7792ac03fa', +] + +const Fp = /* @__PURE__ */ (() => Field(ED25519_P, undefined, true))() + +const ed25519Defaults = /* @__PURE__ */ (() => + ({ + // Removing Fp.create() will still work, and is 10% faster on sign + a: Fp.create(BigInt(-1)), + // d is -121665/121666 a.k.a. Fp.neg(121665 * Fp.inv(121666)) + d: BigInt('37095705934669439343138083508754565189542113879843219016388785533085940283555'), + // Finite field 2n**255n - 19n + Fp, + // Subgroup order 2n**252n + 27742317777372353535851937790883648493n; + n: BigInt('7237005577332262213973186563042994240857116359379907606001950938285454250989'), + h: _8n, + Gx: BigInt('15112221349535400772501151409588531511454012693041857206046113283949847762202'), + Gy: BigInt('46316835694926478169428394003475163141307993866256225615783033603165251855960'), + hash: sha512, + randomBytes, + adjustScalarBytes, + // dom2 + // Ratio of u to v. Allows us to combine inversion and square root. Uses algo from RFC8032 5.1.3. + // Constant-time, u/√v + uvRatio, + }) as const)() + +/** + * ed25519 curve with EdDSA signatures. + * @example + * import { ed25519 } from '@noble/curves/ed25519'; + * const priv = ed25519.utils.randomPrivateKey(); + * const pub = ed25519.getPublicKey(priv); + * const msg = new TextEncoder().encode('hello'); + * const sig = ed25519.sign(msg, priv); + * ed25519.verify(sig, msg, pub); // Default mode: follows ZIP215 + * ed25519.verify(sig, msg, pub, { zip215: false }); // RFC8032 / FIPS 186-5 + */ +export const ed25519: CurveFn = /* @__PURE__ */ (() => twistedEdwards(ed25519Defaults))() + +function ed25519_domain(data: Uint8Array, ctx: Uint8Array, phflag: boolean) { + if (ctx.length > 255) throw new Error('Context is too big') + return concatBytes( + utf8ToBytes('SigEd25519 no Ed25519 collisions'), + new Uint8Array([phflag ? 1 : 0, ctx.length]), + ctx, + data, + ) +} + +export const ed25519ctx: CurveFn = /* @__PURE__ */ (() => + twistedEdwards({ + ...ed25519Defaults, + domain: ed25519_domain, + }))() +export const ed25519ph: CurveFn = /* @__PURE__ */ (() => + twistedEdwards( + Object.assign({}, ed25519Defaults, { + domain: ed25519_domain, + prehash: sha512, + }), + ))() + +/** + * ECDH using curve25519 aka x25519. + * @example + * import { x25519 } from '@noble/curves/ed25519'; + * const priv = 'a546e36bf0527c9d3b16154b82465edd62144c0ac1fc5a18506a2244ba449ac4'; + * const pub = 'e6db6867583030db3594c1a424b15f7c726624ec26b3353b10a903a6d0ab1c4c'; + * x25519.getSharedSecret(priv, pub) === x25519.scalarMult(priv, pub); // aliases + * x25519.getPublicKey(priv) === x25519.scalarMultBase(priv); + * x25519.getPublicKey(x25519.utils.randomPrivateKey()); + */ +export const x25519: XCurveFn = /* @__PURE__ */ (() => + montgomery({ + P: ED25519_P, + a: BigInt(486662), + montgomeryBits: 255, // n is 253 bits + nByteLength: 32, + Gu: BigInt(9), + powPminus2: (x: bigint): bigint => { + const P = ED25519_P + // x^(p-2) aka x^(2^255-21) + const { pow_p_5_8, b2 } = ed25519_pow_2_252_3(x) + return mod(pow2(pow_p_5_8, _3n, P) * b2, P) + }, + adjustScalarBytes, + randomBytes, + }))() + +/** + * Converts ed25519 public key to x25519 public key. Uses formula: + * * `(u, v) = ((1+y)/(1-y), sqrt(-486664)*u/x)` + * * `(x, y) = (sqrt(-486664)*u/v, (u-1)/(u+1))` + * @example + * const someonesPub = ed25519.getPublicKey(ed25519.utils.randomPrivateKey()); + * const aPriv = x25519.utils.randomPrivateKey(); + * x25519.getSharedSecret(aPriv, edwardsToMontgomeryPub(someonesPub)) + */ +export function edwardsToMontgomeryPub(edwardsPub: Hex): Uint8Array { + const { y } = ed25519.ExtendedPoint.fromHex(edwardsPub) + const _1n = BigInt(1) + return Fp.toBytes(Fp.create((_1n + y) * Fp.inv(_1n - y))) +} +export const edwardsToMontgomery: typeof edwardsToMontgomeryPub = edwardsToMontgomeryPub // deprecated + +/** + * Converts ed25519 secret key to x25519 secret key. + * @example + * const someonesPub = x25519.getPublicKey(x25519.utils.randomPrivateKey()); + * const aPriv = ed25519.utils.randomPrivateKey(); + * x25519.getSharedSecret(edwardsToMontgomeryPriv(aPriv), someonesPub) + */ +export function edwardsToMontgomeryPriv(edwardsPriv: Uint8Array): Uint8Array { + const hashed = ed25519Defaults.hash(edwardsPriv.subarray(0, 32)) + return ed25519Defaults.adjustScalarBytes(hashed).subarray(0, 32) +} + +// Hash To Curve Elligator2 Map (NOTE: different from ristretto255 elligator) +// NOTE: very important part is usage of FpSqrtEven for ELL2_C1_EDWARDS, since +// SageMath returns different root first and everything falls apart + +const ELL2_C1 = /* @__PURE__ */ (() => (Fp.ORDER + _3n) / _8n)() // 1. c1 = (q + 3) / 8 # Integer arithmetic +const ELL2_C2 = /* @__PURE__ */ (() => Fp.pow(_2n, ELL2_C1))() // 2. c2 = 2^c1 +const ELL2_C3 = /* @__PURE__ */ (() => Fp.sqrt(Fp.neg(Fp.ONE)))() // 3. c3 = sqrt(-1) + +// prettier-ignore +function map_to_curve_elligator2_curve25519(u: bigint) { + const ELL2_C4 = (Fp.ORDER - _5n) / _8n // 4. c4 = (q - 5) / 8 # Integer arithmetic + const ELL2_J = BigInt(486662) + + let tv1 = Fp.sqr(u) // 1. tv1 = u^2 + tv1 = Fp.mul(tv1, _2n) // 2. tv1 = 2 * tv1 + let xd = Fp.add(tv1, Fp.ONE) // 3. xd = tv1 + 1 # Nonzero: -1 is square (mod p), tv1 is not + let x1n = Fp.neg(ELL2_J) // 4. x1n = -J # x1 = x1n / xd = -J / (1 + 2 * u^2) + let tv2 = Fp.sqr(xd) // 5. tv2 = xd^2 + let gxd = Fp.mul(tv2, xd) // 6. gxd = tv2 * xd # gxd = xd^3 + let gx1 = Fp.mul(tv1, ELL2_J) // 7. gx1 = J * tv1 # x1n + J * xd + gx1 = Fp.mul(gx1, x1n) // 8. gx1 = gx1 * x1n # x1n^2 + J * x1n * xd + gx1 = Fp.add(gx1, tv2) // 9. gx1 = gx1 + tv2 # x1n^2 + J * x1n * xd + xd^2 + gx1 = Fp.mul(gx1, x1n) // 10. gx1 = gx1 * x1n # x1n^3 + J * x1n^2 * xd + x1n * xd^2 + let tv3 = Fp.sqr(gxd) // 11. tv3 = gxd^2 + tv2 = Fp.sqr(tv3) // 12. tv2 = tv3^2 # gxd^4 + tv3 = Fp.mul(tv3, gxd) // 13. tv3 = tv3 * gxd # gxd^3 + tv3 = Fp.mul(tv3, gx1) // 14. tv3 = tv3 * gx1 # gx1 * gxd^3 + tv2 = Fp.mul(tv2, tv3) // 15. tv2 = tv2 * tv3 # gx1 * gxd^7 + let y11 = Fp.pow(tv2, ELL2_C4) // 16. y11 = tv2^c4 # (gx1 * gxd^7)^((p - 5) / 8) + y11 = Fp.mul(y11, tv3) // 17. y11 = y11 * tv3 # gx1*gxd^3*(gx1*gxd^7)^((p-5)/8) + let y12 = Fp.mul(y11, ELL2_C3) // 18. y12 = y11 * c3 + tv2 = Fp.sqr(y11) // 19. tv2 = y11^2 + tv2 = Fp.mul(tv2, gxd) // 20. tv2 = tv2 * gxd + let e1 = Fp.eql(tv2, gx1) // 21. e1 = tv2 == gx1 + let y1 = Fp.cmov(y12, y11, e1) // 22. y1 = CMOV(y12, y11, e1) # If g(x1) is square, this is its sqrt + let x2n = Fp.mul(x1n, tv1) // 23. x2n = x1n * tv1 # x2 = x2n / xd = 2 * u^2 * x1n / xd + let y21 = Fp.mul(y11, u) // 24. y21 = y11 * u + y21 = Fp.mul(y21, ELL2_C2) // 25. y21 = y21 * c2 + let y22 = Fp.mul(y21, ELL2_C3) // 26. y22 = y21 * c3 + let gx2 = Fp.mul(gx1, tv1) // 27. gx2 = gx1 * tv1 # g(x2) = gx2 / gxd = 2 * u^2 * g(x1) + tv2 = Fp.sqr(y21) // 28. tv2 = y21^2 + tv2 = Fp.mul(tv2, gxd) // 29. tv2 = tv2 * gxd + let e2 = Fp.eql(tv2, gx2) // 30. e2 = tv2 == gx2 + let y2 = Fp.cmov(y22, y21, e2) // 31. y2 = CMOV(y22, y21, e2) # If g(x2) is square, this is its sqrt + tv2 = Fp.sqr(y1) // 32. tv2 = y1^2 + tv2 = Fp.mul(tv2, gxd) // 33. tv2 = tv2 * gxd + let e3 = Fp.eql(tv2, gx1) // 34. e3 = tv2 == gx1 + let xn = Fp.cmov(x2n, x1n, e3) // 35. xn = CMOV(x2n, x1n, e3) # If e3, x = x1, else x = x2 + let y = Fp.cmov(y2, y1, e3) // 36. y = CMOV(y2, y1, e3) # If e3, y = y1, else y = y2 + let e4 = Fp.isOdd(y) // 37. e4 = sgn0(y) == 1 # Fix sign of y + y = Fp.cmov(y, Fp.neg(y), e3 !== e4) // 38. y = CMOV(y, -y, e3 XOR e4) + return { xMn: xn, xMd: xd, yMn: y, yMd: _1n } // 39. return (xn, xd, y, 1) +} + +const ELL2_C1_EDWARDS = /* @__PURE__ */ (() => FpSqrtEven(Fp, Fp.neg(BigInt(486664))))() // sgn0(c1) MUST equal 0 +function map_to_curve_elligator2_edwards25519(u: bigint) { + const { xMn, xMd, yMn, yMd } = map_to_curve_elligator2_curve25519(u) // 1. (xMn, xMd, yMn, yMd) = + // map_to_curve_elligator2_curve25519(u) + let xn = Fp.mul(xMn, yMd) // 2. xn = xMn * yMd + xn = Fp.mul(xn, ELL2_C1_EDWARDS) // 3. xn = xn * c1 + let xd = Fp.mul(xMd, yMn) // 4. xd = xMd * yMn # xn / xd = c1 * xM / yM + let yn = Fp.sub(xMn, xMd) // 5. yn = xMn - xMd + let yd = Fp.add(xMn, xMd) // 6. yd = xMn + xMd # (n / d - 1) / (n / d + 1) = (n - d) / (n + d) + let tv1 = Fp.mul(xd, yd) // 7. tv1 = xd * yd + let e = Fp.eql(tv1, Fp.ZERO) // 8. e = tv1 == 0 + xn = Fp.cmov(xn, Fp.ZERO, e) // 9. xn = CMOV(xn, 0, e) + xd = Fp.cmov(xd, Fp.ONE, e) // 10. xd = CMOV(xd, 1, e) + yn = Fp.cmov(yn, Fp.ONE, e) // 11. yn = CMOV(yn, 1, e) + yd = Fp.cmov(yd, Fp.ONE, e) // 12. yd = CMOV(yd, 1, e) + + const inv = Fp.invertBatch([xd, yd]) // batch division + return { x: Fp.mul(xn, inv[0]), y: Fp.mul(yn, inv[1]) } // 13. return (xn, xd, yn, yd) +} + +export const ed25519_hasher: Hasher = /* @__PURE__ */ (() => + createHasher( + ed25519.ExtendedPoint, + (scalars: bigint[]) => map_to_curve_elligator2_edwards25519(scalars[0]), + { + DST: 'edwards25519_XMD:SHA-512_ELL2_RO_', + encodeDST: 'edwards25519_XMD:SHA-512_ELL2_NU_', + p: Fp.ORDER, + m: 1, + k: 128, + expand: 'xmd', + hash: sha512, + }, + ))() +/** + * @deprecated Use `ed25519_hasher` + */ +export const hashToCurve: HTFMethod = /* @__PURE__ */ (() => ed25519_hasher.hashToCurve)() +/** + * @deprecated Use `ed25519_hasher` + */ +export const encodeToCurve: HTFMethod = /* @__PURE__ */ (() => + ed25519_hasher.encodeToCurve)() + +function aristp(other: unknown) { + if (!(other instanceof RistPoint)) throw new Error('RistrettoPoint expected') +} + +// √(-1) aka √(a) aka 2^((p-1)/4) +const SQRT_M1 = ED25519_SQRT_M1 +// √(ad - 1) +const SQRT_AD_MINUS_ONE = /* @__PURE__ */ BigInt( + '25063068953384623474111414158702152701244531502492656460079210482610430750235', +) +// 1 / √(a-d) +const INVSQRT_A_MINUS_D = /* @__PURE__ */ BigInt( + '54469307008909316920995813868745141605393597292927456921205312896311721017578', +) +// 1-d² +const ONE_MINUS_D_SQ = /* @__PURE__ */ BigInt( + '1159843021668779879193775521855586647937357759715417654439879720876111806838', +) +// (d-1)² +const D_MINUS_ONE_SQ = /* @__PURE__ */ BigInt( + '40440834346308536858101042469323190826248399146238708352240133220865137265952', +) +// Calculates 1/√(number) +const invertSqrt = (number: bigint) => uvRatio(_1n, number) + +const MAX_255B = /* @__PURE__ */ BigInt( + '0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', +) +const bytes255ToNumberLE = (bytes: Uint8Array) => + ed25519.CURVE.Fp.create(bytesToNumberLE(bytes) & MAX_255B) + +type ExtendedPoint = ExtPointType + +// Computes Elligator map for Ristretto +// https://ristretto.group/formulas/elligator.html +function calcElligatorRistrettoMap(r0: bigint): ExtendedPoint { + const { d } = ed25519.CURVE + const P = ed25519.CURVE.Fp.ORDER + const mod = ed25519.CURVE.Fp.create + const r = mod(SQRT_M1 * r0 * r0) // 1 + const Ns = mod((r + _1n) * ONE_MINUS_D_SQ) // 2 + let c = BigInt(-1) // 3 + const D = mod((c - d * r) * mod(r + d)) // 4 + let { isValid: Ns_D_is_sq, value: s } = uvRatio(Ns, D) // 5 + let s_ = mod(s * r0) // 6 + if (!isNegativeLE(s_, P)) s_ = mod(-s_) + if (!Ns_D_is_sq) s = s_ // 7 + if (!Ns_D_is_sq) c = r // 8 + const Nt = mod(c * (r - _1n) * D_MINUS_ONE_SQ - D) // 9 + const s2 = s * s + const W0 = mod((s + s) * D) // 10 + const W1 = mod(Nt * SQRT_AD_MINUS_ONE) // 11 + const W2 = mod(_1n - s2) // 12 + const W3 = mod(_1n + s2) // 13 + return new ed25519.ExtendedPoint(mod(W0 * W3), mod(W2 * W1), mod(W1 * W3), mod(W0 * W2)) +} + +/** + * Each ed25519/ExtendedPoint has 8 different equivalent points. This can be + * a source of bugs for protocols like ring signatures. Ristretto was created to solve this. + * Ristretto point operates in X:Y:Z:T extended coordinates like ExtendedPoint, + * but it should work in its own namespace: do not combine those two. + * https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-ristretto255-decaf448 + */ +class RistPoint implements Group { + static BASE: RistPoint + static ZERO: RistPoint + private readonly ep: ExtendedPoint + // Private property to discourage combining ExtendedPoint + RistrettoPoint + // Always use Ristretto encoding/decoding instead. + constructor(ep: ExtendedPoint) { + this.ep = ep + } + + static fromAffine(ap: AffinePoint): RistPoint { + return new RistPoint(ed25519.ExtendedPoint.fromAffine(ap)) + } + + /** + * Takes uniform output of 64-byte hash function like sha512 and converts it to `RistrettoPoint`. + * The hash-to-group operation applies Elligator twice and adds the results. + * **Note:** this is one-way map, there is no conversion from point to hash. + * https://ristretto.group/formulas/elligator.html + * @param hex 64-byte output of a hash function + */ + static hashToCurve(hex: Hex): RistPoint { + hex = ensureBytes('ristrettoHash', hex, 64) + const r1 = bytes255ToNumberLE(hex.slice(0, 32)) + const R1 = calcElligatorRistrettoMap(r1) + const r2 = bytes255ToNumberLE(hex.slice(32, 64)) + const R2 = calcElligatorRistrettoMap(r2) + return new RistPoint(R1.add(R2)) + } + + /** + * Converts ristretto-encoded string to ristretto point. + * https://ristretto.group/formulas/decoding.html + * @param hex Ristretto-encoded 32 bytes. Not every 32-byte string is valid ristretto encoding + */ + static fromHex(hex: Hex): RistPoint { + hex = ensureBytes('ristrettoHex', hex, 32) + const { a, d } = ed25519.CURVE + const P = ed25519.CURVE.Fp.ORDER + const mod = ed25519.CURVE.Fp.create + const emsg = 'RistrettoPoint.fromHex: the hex is not valid encoding of RistrettoPoint' + const s = bytes255ToNumberLE(hex) + // 1. Check that s_bytes is the canonical encoding of a field element, or else abort. + // 3. Check that s is non-negative, or else abort + if (!equalBytes(numberToBytesLE(s, 32), hex) || isNegativeLE(s, P)) throw new Error(emsg) + const s2 = mod(s * s) + const u1 = mod(_1n + a * s2) // 4 (a is -1) + const u2 = mod(_1n - a * s2) // 5 + const u1_2 = mod(u1 * u1) + const u2_2 = mod(u2 * u2) + const v = mod(a * d * u1_2 - u2_2) // 6 + const { isValid, value: I } = invertSqrt(mod(v * u2_2)) // 7 + const Dx = mod(I * u2) // 8 + const Dy = mod(I * Dx * v) // 9 + let x = mod((s + s) * Dx) // 10 + if (isNegativeLE(x, P)) x = mod(-x) // 10 + const y = mod(u1 * Dy) // 11 + const t = mod(x * y) // 12 + if (!isValid || isNegativeLE(t, P) || y === _0n) throw new Error(emsg) + return new RistPoint(new ed25519.ExtendedPoint(x, y, _1n, t)) + } + + static msm(points: RistPoint[], scalars: bigint[]): RistPoint { + const Fn = Field(ed25519.CURVE.n, ed25519.CURVE.nBitLength) + return pippenger(RistPoint, Fn, points, scalars) + } + + /** + * Encodes ristretto point to Uint8Array. + * https://ristretto.group/formulas/encoding.html + */ + toRawBytes(): Uint8Array { + let { ex: x, ey: y, ez: z, et: t } = this.ep + const P = ed25519.CURVE.Fp.ORDER + const mod = ed25519.CURVE.Fp.create + const u1 = mod(mod(z + y) * mod(z - y)) // 1 + const u2 = mod(x * y) // 2 + // Square root always exists + const u2sq = mod(u2 * u2) + const { value: invsqrt } = invertSqrt(mod(u1 * u2sq)) // 3 + const D1 = mod(invsqrt * u1) // 4 + const D2 = mod(invsqrt * u2) // 5 + const zInv = mod(D1 * D2 * t) // 6 + let D: bigint // 7 + if (isNegativeLE(t * zInv, P)) { + let _x = mod(y * SQRT_M1) + let _y = mod(x * SQRT_M1) + x = _x + y = _y + D = mod(D1 * INVSQRT_A_MINUS_D) + } else { + D = D2 // 8 + } + if (isNegativeLE(x * zInv, P)) y = mod(-y) // 9 + let s = mod((z - y) * D) // 10 (check footer's note, no sqrt(-a)) + if (isNegativeLE(s, P)) s = mod(-s) + return numberToBytesLE(s, 32) // 11 + } + + toHex(): string { + return bytesToHex(this.toRawBytes()) + } + + toString(): string { + return this.toHex() + } + + // Compare one point to another. + equals(other: RistPoint): boolean { + aristp(other) + const { ex: X1, ey: Y1 } = this.ep + const { ex: X2, ey: Y2 } = other.ep + const mod = ed25519.CURVE.Fp.create + // (x1 * y2 == y1 * x2) | (y1 * y2 == x1 * x2) + const one = mod(X1 * Y2) === mod(Y1 * X2) + const two = mod(Y1 * Y2) === mod(X1 * X2) + return one || two + } + + add(other: RistPoint): RistPoint { + aristp(other) + return new RistPoint(this.ep.add(other.ep)) + } + + subtract(other: RistPoint): RistPoint { + aristp(other) + return new RistPoint(this.ep.subtract(other.ep)) + } + + multiply(scalar: bigint): RistPoint { + return new RistPoint(this.ep.multiply(scalar)) + } + + multiplyUnsafe(scalar: bigint): RistPoint { + return new RistPoint(this.ep.multiplyUnsafe(scalar)) + } + + double(): RistPoint { + return new RistPoint(this.ep.double()) + } + + negate(): RistPoint { + return new RistPoint(this.ep.negate()) + } +} +export const RistrettoPoint: typeof RistPoint = /* @__PURE__ */ (() => { + if (!RistPoint.BASE) RistPoint.BASE = new RistPoint(ed25519.ExtendedPoint.BASE) + if (!RistPoint.ZERO) RistPoint.ZERO = new RistPoint(ed25519.ExtendedPoint.ZERO) + return RistPoint +})() + +// Hashing to ristretto255. https://www.rfc-editor.org/rfc/rfc9380#appendix-B +export const hashToRistretto255 = (msg: Uint8Array, options: htfBasicOpts): RistPoint => { + const d = options.DST + const DST = typeof d === 'string' ? utf8ToBytes(d) : d + const uniform_bytes = expand_message_xmd(msg, DST, 64, sha512) + const P = RistPoint.hashToCurve(uniform_bytes) + return P +} +/** @deprecated */ +export const hash_to_ristretto255: (msg: Uint8Array, options: htfBasicOpts) => RistPoint = + hashToRistretto255 // legacy diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/ed448.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/ed448.ts new file mode 100644 index 00000000000..c39163373f2 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/ed448.ts @@ -0,0 +1,522 @@ +/** + * Edwards448 (not Ed448-Goldilocks) curve with following addons: + * - X448 ECDH + * - Decaf cofactor elimination + * - Elligator hash-to-group / point indistinguishability + * Conforms to RFC 8032 https://www.rfc-editor.org/rfc/rfc8032.html#section-5.2 + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { shake256 } from '@noble/hashes/sha3' +import { concatBytes, randomBytes, utf8ToBytes, wrapConstructor } from '@noble/hashes/utils' +import type { AffinePoint, Group } from './abstract/curve.ts' +import { pippenger } from './abstract/curve.ts' +import { type CurveFn, type ExtPointType, twistedEdwards } from './abstract/edwards.ts' +import { + type HTFMethod, + type Hasher, + createHasher, + expand_message_xof, + type htfBasicOpts, +} from './abstract/hash-to-curve.ts' +import { Field, isNegativeLE, mod, pow2 } from './abstract/modular.ts' +import { type CurveFn as XCurveFn, montgomery } from './abstract/montgomery.ts' +import { + type Hex, + bytesToHex, + bytesToNumberLE, + ensureBytes, + equalBytes, + numberToBytesLE, +} from './abstract/utils.ts' + +const shake256_114 = wrapConstructor(() => shake256.create({ dkLen: 114 })) +const shake256_64 = wrapConstructor(() => shake256.create({ dkLen: 64 })) +const ed448P = BigInt( + '726838724295606890549323807888004534353641360687318060281490199180612328166730772686396383698676545930088884461843637361053498018365439', +) + +// prettier-ignore +const _1n = BigInt(1), + _2n = BigInt(2), + _3n = BigInt(3), + _4n = BigInt(4), + _11n = BigInt(11) +// prettier-ignore +const _22n = BigInt(22), + _44n = BigInt(44), + _88n = BigInt(88), + _223n = BigInt(223) + +// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4. +// Used for efficient square root calculation. +// ((P-3)/4).toString(2) would produce bits [223x 1, 0, 222x 1] +function ed448_pow_Pminus3div4(x: bigint): bigint { + const P = ed448P + const b2 = (x * x * x) % P + const b3 = (b2 * b2 * x) % P + const b6 = (pow2(b3, _3n, P) * b3) % P + const b9 = (pow2(b6, _3n, P) * b3) % P + const b11 = (pow2(b9, _2n, P) * b2) % P + const b22 = (pow2(b11, _11n, P) * b11) % P + const b44 = (pow2(b22, _22n, P) * b22) % P + const b88 = (pow2(b44, _44n, P) * b44) % P + const b176 = (pow2(b88, _88n, P) * b88) % P + const b220 = (pow2(b176, _44n, P) * b44) % P + const b222 = (pow2(b220, _2n, P) * b2) % P + const b223 = (pow2(b222, _1n, P) * x) % P + return (pow2(b223, _223n, P) * b222) % P +} + +function adjustScalarBytes(bytes: Uint8Array): Uint8Array { + // Section 5: Likewise, for X448, set the two least significant bits of the first byte to 0, and the most + // significant bit of the last byte to 1. + bytes[0] &= 252 // 0b11111100 + // and the most significant bit of the last byte to 1. + bytes[55] |= 128 // 0b10000000 + // NOTE: is is NOOP for 56 bytes scalars (X25519/X448) + bytes[56] = 0 // Byte outside of group (456 buts vs 448 bits) + return bytes +} + +// Constant-time ratio of u to v. Allows to combine inversion and square root u/√v. +// Uses algo from RFC8032 5.1.3. +function uvRatio(u: bigint, v: bigint): { isValid: boolean; value: bigint } { + const P = ed448P + // https://www.rfc-editor.org/rfc/rfc8032#section-5.2.3 + // To compute the square root of (u/v), the first step is to compute the + // candidate root x = (u/v)^((p+1)/4). This can be done using the + // following trick, to use a single modular powering for both the + // inversion of v and the square root: + // x = (u/v)^((p+1)/4) = u³v(u⁵v³)^((p-3)/4) (mod p) + const u2v = mod(u * u * v, P) // u²v + const u3v = mod(u2v * u, P) // u³v + const u5v3 = mod(u3v * u2v * v, P) // u⁵v³ + const root = ed448_pow_Pminus3div4(u5v3) + const x = mod(u3v * root, P) + // Verify that root is exists + const x2 = mod(x * x, P) // x² + // If vx² = u, the recovered x-coordinate is x. Otherwise, no + // square root exists, and the decoding fails. + return { isValid: mod(x2 * v, P) === u, value: x } +} + +const Fp = Field(ed448P, 456, true) + +const ED448_DEF = { + // Param: a + a: BigInt(1), + // -39081 a.k.a. Fp.neg(39081) + d: BigInt( + '726838724295606890549323807888004534353641360687318060281490199180612328166730772686396383698676545930088884461843637361053498018326358', + ), + // Finite field 2n**448n - 2n**224n - 1n + Fp, + // Subgroup order + // 2n**446n - 13818066809895115352007386748515426880336692474882178609894547503885n + n: BigInt( + '181709681073901722637330951972001133588410340171829515070372549795146003961539585716195755291692375963310293709091662304773755859649779', + ), + // RFC 7748 has 56-byte keys, RFC 8032 has 57-byte keys + nBitLength: 456, + h: BigInt(4), + Gx: BigInt( + '224580040295924300187604334099896036246789641632564134246125461686950415467406032909029192869357953282578032075146446173674602635247710', + ), + Gy: BigInt( + '298819210078481492676017930443930673437544040154080242095928241372331506189835876003536878655418784733982303233503462500531545062832660', + ), + // SHAKE256(dom4(phflag,context)||x, 114) + hash: shake256_114, + randomBytes, + adjustScalarBytes, + // dom4 + domain: (data: Uint8Array, ctx: Uint8Array, phflag: boolean) => { + if (ctx.length > 255) throw new Error('context must be smaller than 255, got: ' + ctx.length) + return concatBytes( + utf8ToBytes('SigEd448'), + new Uint8Array([phflag ? 1 : 0, ctx.length]), + ctx, + data, + ) + }, + uvRatio, +} as const + +/** + * ed448 EdDSA curve and methods. + * @example + * import { ed448 } from '@noble/curves/ed448'; + * const priv = ed448.utils.randomPrivateKey(); + * const pub = ed448.getPublicKey(priv); + * const msg = new TextEncoder().encode('whatsup'); + * const sig = ed448.sign(msg, priv); + * ed448.verify(sig, msg, pub); + */ +export const ed448: CurveFn = /* @__PURE__ */ twistedEdwards(ED448_DEF) +// NOTE: there is no ed448ctx, since ed448 supports ctx by default +export const ed448ph: CurveFn = /* @__PURE__ */ twistedEdwards({ + ...ED448_DEF, + prehash: shake256_64, +}) + +/** + * ECDH using curve448 aka x448. + */ +export const x448: XCurveFn = /* @__PURE__ */ (() => + montgomery({ + a: BigInt(156326), + // RFC 7748 has 56-byte keys, RFC 8032 has 57-byte keys + montgomeryBits: 448, + nByteLength: 56, + P: ed448P, + Gu: BigInt(5), + powPminus2: (x: bigint): bigint => { + const P = ed448P + const Pminus3div4 = ed448_pow_Pminus3div4(x) + const Pminus3 = pow2(Pminus3div4, BigInt(2), P) + return mod(Pminus3 * x, P) // Pminus3 * x = Pminus2 + }, + adjustScalarBytes, + randomBytes, + }))() + +/** + * Converts edwards448 public key to x448 public key. Uses formula: + * * `(u, v) = ((y-1)/(y+1), sqrt(156324)*u/x)` + * * `(x, y) = (sqrt(156324)*u/v, (1+u)/(1-u))` + * @example + * const aPub = ed448.getPublicKey(utils.randomPrivateKey()); + * x448.getSharedSecret(edwardsToMontgomery(aPub), edwardsToMontgomery(someonesPub)) + */ +export function edwardsToMontgomeryPub(edwardsPub: string | Uint8Array): Uint8Array { + const { y } = ed448.ExtendedPoint.fromHex(edwardsPub) + const _1n = BigInt(1) + return Fp.toBytes(Fp.create((y - _1n) * Fp.inv(y + _1n))) +} + +export const edwardsToMontgomery: typeof edwardsToMontgomeryPub = edwardsToMontgomeryPub // deprecated +// TODO: add edwardsToMontgomeryPriv, similar to ed25519 version + +// Hash To Curve Elligator2 Map +const ELL2_C1 = (Fp.ORDER - BigInt(3)) / BigInt(4) // 1. c1 = (q - 3) / 4 # Integer arithmetic +const ELL2_J = BigInt(156326) + +function map_to_curve_elligator2_curve448(u: bigint) { + let tv1 = Fp.sqr(u) // 1. tv1 = u^2 + let e1 = Fp.eql(tv1, Fp.ONE) // 2. e1 = tv1 == 1 + tv1 = Fp.cmov(tv1, Fp.ZERO, e1) // 3. tv1 = CMOV(tv1, 0, e1) # If Z * u^2 == -1, set tv1 = 0 + let xd = Fp.sub(Fp.ONE, tv1) // 4. xd = 1 - tv1 + let x1n = Fp.neg(ELL2_J) // 5. x1n = -J + let tv2 = Fp.sqr(xd) // 6. tv2 = xd^2 + let gxd = Fp.mul(tv2, xd) // 7. gxd = tv2 * xd # gxd = xd^3 + let gx1 = Fp.mul(tv1, Fp.neg(ELL2_J)) // 8. gx1 = -J * tv1 # x1n + J * xd + gx1 = Fp.mul(gx1, x1n) // 9. gx1 = gx1 * x1n # x1n^2 + J * x1n * xd + gx1 = Fp.add(gx1, tv2) // 10. gx1 = gx1 + tv2 # x1n^2 + J * x1n * xd + xd^2 + gx1 = Fp.mul(gx1, x1n) // 11. gx1 = gx1 * x1n # x1n^3 + J * x1n^2 * xd + x1n * xd^2 + let tv3 = Fp.sqr(gxd) // 12. tv3 = gxd^2 + tv2 = Fp.mul(gx1, gxd) // 13. tv2 = gx1 * gxd # gx1 * gxd + tv3 = Fp.mul(tv3, tv2) // 14. tv3 = tv3 * tv2 # gx1 * gxd^3 + let y1 = Fp.pow(tv3, ELL2_C1) // 15. y1 = tv3^c1 # (gx1 * gxd^3)^((p - 3) / 4) + y1 = Fp.mul(y1, tv2) // 16. y1 = y1 * tv2 # gx1 * gxd * (gx1 * gxd^3)^((p - 3) / 4) + let x2n = Fp.mul(x1n, Fp.neg(tv1)) // 17. x2n = -tv1 * x1n # x2 = x2n / xd = -1 * u^2 * x1n / xd + let y2 = Fp.mul(y1, u) // 18. y2 = y1 * u + y2 = Fp.cmov(y2, Fp.ZERO, e1) // 19. y2 = CMOV(y2, 0, e1) + tv2 = Fp.sqr(y1) // 20. tv2 = y1^2 + tv2 = Fp.mul(tv2, gxd) // 21. tv2 = tv2 * gxd + let e2 = Fp.eql(tv2, gx1) // 22. e2 = tv2 == gx1 + let xn = Fp.cmov(x2n, x1n, e2) // 23. xn = CMOV(x2n, x1n, e2) # If e2, x = x1, else x = x2 + let y = Fp.cmov(y2, y1, e2) // 24. y = CMOV(y2, y1, e2) # If e2, y = y1, else y = y2 + let e3 = Fp.isOdd(y) // 25. e3 = sgn0(y) == 1 # Fix sign of y + y = Fp.cmov(y, Fp.neg(y), e2 !== e3) // 26. y = CMOV(y, -y, e2 XOR e3) + return { xn, xd, yn: y, yd: Fp.ONE } // 27. return (xn, xd, y, 1) +} + +function map_to_curve_elligator2_edwards448(u: bigint) { + let { xn, xd, yn, yd } = map_to_curve_elligator2_curve448(u) // 1. (xn, xd, yn, yd) = map_to_curve_elligator2_curve448(u) + let xn2 = Fp.sqr(xn) // 2. xn2 = xn^2 + let xd2 = Fp.sqr(xd) // 3. xd2 = xd^2 + let xd4 = Fp.sqr(xd2) // 4. xd4 = xd2^2 + let yn2 = Fp.sqr(yn) // 5. yn2 = yn^2 + let yd2 = Fp.sqr(yd) // 6. yd2 = yd^2 + let xEn = Fp.sub(xn2, xd2) // 7. xEn = xn2 - xd2 + let tv2 = Fp.sub(xEn, xd2) // 8. tv2 = xEn - xd2 + xEn = Fp.mul(xEn, xd2) // 9. xEn = xEn * xd2 + xEn = Fp.mul(xEn, yd) // 10. xEn = xEn * yd + xEn = Fp.mul(xEn, yn) // 11. xEn = xEn * yn + xEn = Fp.mul(xEn, _4n) // 12. xEn = xEn * 4 + tv2 = Fp.mul(tv2, xn2) // 13. tv2 = tv2 * xn2 + tv2 = Fp.mul(tv2, yd2) // 14. tv2 = tv2 * yd2 + let tv3 = Fp.mul(yn2, _4n) // 15. tv3 = 4 * yn2 + let tv1 = Fp.add(tv3, yd2) // 16. tv1 = tv3 + yd2 + tv1 = Fp.mul(tv1, xd4) // 17. tv1 = tv1 * xd4 + let xEd = Fp.add(tv1, tv2) // 18. xEd = tv1 + tv2 + tv2 = Fp.mul(tv2, xn) // 19. tv2 = tv2 * xn + let tv4 = Fp.mul(xn, xd4) // 20. tv4 = xn * xd4 + let yEn = Fp.sub(tv3, yd2) // 21. yEn = tv3 - yd2 + yEn = Fp.mul(yEn, tv4) // 22. yEn = yEn * tv4 + yEn = Fp.sub(yEn, tv2) // 23. yEn = yEn - tv2 + tv1 = Fp.add(xn2, xd2) // 24. tv1 = xn2 + xd2 + tv1 = Fp.mul(tv1, xd2) // 25. tv1 = tv1 * xd2 + tv1 = Fp.mul(tv1, xd) // 26. tv1 = tv1 * xd + tv1 = Fp.mul(tv1, yn2) // 27. tv1 = tv1 * yn2 + tv1 = Fp.mul(tv1, BigInt(-2)) // 28. tv1 = -2 * tv1 + let yEd = Fp.add(tv2, tv1) // 29. yEd = tv2 + tv1 + tv4 = Fp.mul(tv4, yd2) // 30. tv4 = tv4 * yd2 + yEd = Fp.add(yEd, tv4) // 31. yEd = yEd + tv4 + tv1 = Fp.mul(xEd, yEd) // 32. tv1 = xEd * yEd + let e = Fp.eql(tv1, Fp.ZERO) // 33. e = tv1 == 0 + xEn = Fp.cmov(xEn, Fp.ZERO, e) // 34. xEn = CMOV(xEn, 0, e) + xEd = Fp.cmov(xEd, Fp.ONE, e) // 35. xEd = CMOV(xEd, 1, e) + yEn = Fp.cmov(yEn, Fp.ONE, e) // 36. yEn = CMOV(yEn, 1, e) + yEd = Fp.cmov(yEd, Fp.ONE, e) // 37. yEd = CMOV(yEd, 1, e) + + const inv = Fp.invertBatch([xEd, yEd]) // batch division + return { x: Fp.mul(xEn, inv[0]), y: Fp.mul(yEn, inv[1]) } // 38. return (xEn, xEd, yEn, yEd) +} + +export const ed448_hasher: Hasher = /* @__PURE__ */ (() => + createHasher( + ed448.ExtendedPoint, + (scalars: bigint[]) => map_to_curve_elligator2_edwards448(scalars[0]), + { + DST: 'edwards448_XOF:SHAKE256_ELL2_RO_', + encodeDST: 'edwards448_XOF:SHAKE256_ELL2_NU_', + p: Fp.ORDER, + m: 1, + k: 224, + expand: 'xof', + hash: shake256, + }, + ))() +/** + * @deprecated Use `ed448_hasher` + */ +export const hashToCurve: HTFMethod = /* @__PURE__ */ (() => ed448_hasher.hashToCurve)() +/** + * @deprecated Use `ed448_hasher` + */ +export const encodeToCurve: HTFMethod = /* @__PURE__ */ (() => ed448_hasher.encodeToCurve)() + +function adecafp(other: unknown) { + if (!(other instanceof DcfPoint)) throw new Error('DecafPoint expected') +} + +// 1-d +const ONE_MINUS_D = BigInt('39082') +// 1-2d +const ONE_MINUS_TWO_D = BigInt('78163') +// √(-d) +const SQRT_MINUS_D = BigInt( + '98944233647732219769177004876929019128417576295529901074099889598043702116001257856802131563896515373927712232092845883226922417596214', +) +// 1 / √(-d) +const INVSQRT_MINUS_D = BigInt( + '315019913931389607337177038330951043522456072897266928557328499619017160722351061360252776265186336876723201881398623946864393857820716', +) +// Calculates 1/√(number) +const invertSqrt = (number: bigint) => uvRatio(_1n, number) + +const MAX_448B = BigInt( + '0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', +) +const bytes448ToNumberLE = (bytes: Uint8Array) => + ed448.CURVE.Fp.create(bytesToNumberLE(bytes) & MAX_448B) + +type ExtendedPoint = ExtPointType + +// Computes Elligator map for Decaf +// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-ristretto255-decaf448-07#name-element-derivation-2 +function calcElligatorDecafMap(r0: bigint): ExtendedPoint { + const { d } = ed448.CURVE + const P = ed448.CURVE.Fp.ORDER + const mod = ed448.CURVE.Fp.create + + const r = mod(-(r0 * r0)) // 1 + const u0 = mod(d * (r - _1n)) // 2 + const u1 = mod((u0 + _1n) * (u0 - r)) // 3 + + const { isValid: was_square, value: v } = uvRatio(ONE_MINUS_TWO_D, mod((r + _1n) * u1)) // 4 + + let v_prime = v // 5 + if (!was_square) v_prime = mod(r0 * v) + + let sgn = _1n // 6 + if (!was_square) sgn = mod(-_1n) + + const s = mod(v_prime * (r + _1n)) // 7 + let s_abs = s + if (isNegativeLE(s, P)) s_abs = mod(-s) + + const s2 = s * s + const W0 = mod(s_abs * _2n) // 8 + const W1 = mod(s2 + _1n) // 9 + const W2 = mod(s2 - _1n) // 10 + const W3 = mod(v_prime * s * (r - _1n) * ONE_MINUS_TWO_D + sgn) // 11 + return new ed448.ExtendedPoint(mod(W0 * W3), mod(W2 * W1), mod(W1 * W3), mod(W0 * W2)) +} + +/** + * Each ed448/ExtendedPoint has 4 different equivalent points. This can be + * a source of bugs for protocols like ring signatures. Decaf was created to solve this. + * Decaf point operates in X:Y:Z:T extended coordinates like ExtendedPoint, + * but it should work in its own namespace: do not combine those two. + * https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-ristretto255-decaf448 + */ +class DcfPoint implements Group { + static BASE: DcfPoint + static ZERO: DcfPoint + private readonly ep: ExtendedPoint + // Private property to discourage combining ExtendedPoint + DecafPoint + // Always use Decaf encoding/decoding instead. + constructor(ep: ExtendedPoint) { + this.ep = ep + } + + static fromAffine(ap: AffinePoint): DcfPoint { + return new DcfPoint(ed448.ExtendedPoint.fromAffine(ap)) + } + + /** + * Takes uniform output of 112-byte hash function like shake256 and converts it to `DecafPoint`. + * The hash-to-group operation applies Elligator twice and adds the results. + * **Note:** this is one-way map, there is no conversion from point to hash. + * https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-ristretto255-decaf448-07#name-element-derivation-2 + * @param hex 112-byte output of a hash function + */ + static hashToCurve(hex: Hex): DcfPoint { + hex = ensureBytes('decafHash', hex, 112) + const r1 = bytes448ToNumberLE(hex.slice(0, 56)) + const R1 = calcElligatorDecafMap(r1) + const r2 = bytes448ToNumberLE(hex.slice(56, 112)) + const R2 = calcElligatorDecafMap(r2) + return new DcfPoint(R1.add(R2)) + } + + /** + * Converts decaf-encoded string to decaf point. + * https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-ristretto255-decaf448-07#name-decode-2 + * @param hex Decaf-encoded 56 bytes. Not every 56-byte string is valid decaf encoding + */ + static fromHex(hex: Hex): DcfPoint { + hex = ensureBytes('decafHex', hex, 56) + const { d } = ed448.CURVE + const P = ed448.CURVE.Fp.ORDER + const mod = ed448.CURVE.Fp.create + const emsg = 'DecafPoint.fromHex: the hex is not valid encoding of DecafPoint' + const s = bytes448ToNumberLE(hex) + + // 1. Check that s_bytes is the canonical encoding of a field element, or else abort. + // 2. Check that s is non-negative, or else abort + if (!equalBytes(numberToBytesLE(s, 56), hex) || isNegativeLE(s, P)) throw new Error(emsg) + + const s2 = mod(s * s) // 1 + const u1 = mod(_1n + s2) // 2 + const u1sq = mod(u1 * u1) + const u2 = mod(u1sq - _4n * d * s2) // 3 + + const { isValid, value: invsqrt } = invertSqrt(mod(u2 * u1sq)) // 4 + + let u3 = mod((s + s) * invsqrt * u1 * SQRT_MINUS_D) // 5 + if (isNegativeLE(u3, P)) u3 = mod(-u3) + + const x = mod(u3 * invsqrt * u2 * INVSQRT_MINUS_D) // 6 + const y = mod((_1n - s2) * invsqrt * u1) // 7 + const t = mod(x * y) // 8 + + if (!isValid) throw new Error(emsg) + return new DcfPoint(new ed448.ExtendedPoint(x, y, _1n, t)) + } + + static msm(points: DcfPoint[], scalars: bigint[]): DcfPoint { + const Fn = Field(ed448.CURVE.n, ed448.CURVE.nBitLength) + return pippenger(DcfPoint, Fn, points, scalars) + } + + /** + * Encodes decaf point to Uint8Array. + * https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-ristretto255-decaf448-07#name-encode-2 + */ + toRawBytes(): Uint8Array { + let { ex: x, ey: _y, ez: z, et: t } = this.ep + const P = ed448.CURVE.Fp.ORDER + const mod = ed448.CURVE.Fp.create + + const u1 = mod(mod(x + t) * mod(x - t)) // 1 + const x2 = mod(x * x) + const { value: invsqrt } = invertSqrt(mod(u1 * ONE_MINUS_D * x2)) // 2 + + let ratio = mod(invsqrt * u1 * SQRT_MINUS_D) // 3 + if (isNegativeLE(ratio, P)) ratio = mod(-ratio) + + const u2 = mod(INVSQRT_MINUS_D * ratio * z - t) // 4 + + let s = mod(ONE_MINUS_D * invsqrt * x * u2) // 5 + if (isNegativeLE(s, P)) s = mod(-s) + + return numberToBytesLE(s, 56) + } + + toHex(): string { + return bytesToHex(this.toRawBytes()) + } + + toString(): string { + return this.toHex() + } + + // Compare one point to another. + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-ristretto255-decaf448-07#name-equals-2 + equals(other: DcfPoint): boolean { + adecafp(other) + const { ex: X1, ey: Y1 } = this.ep + const { ex: X2, ey: Y2 } = other.ep + const mod = ed448.CURVE.Fp.create + // (x1 * y2 == y1 * x2) + return mod(X1 * Y2) === mod(Y1 * X2) + } + + add(other: DcfPoint): DcfPoint { + adecafp(other) + return new DcfPoint(this.ep.add(other.ep)) + } + + subtract(other: DcfPoint): DcfPoint { + adecafp(other) + return new DcfPoint(this.ep.subtract(other.ep)) + } + + multiply(scalar: bigint): DcfPoint { + return new DcfPoint(this.ep.multiply(scalar)) + } + + multiplyUnsafe(scalar: bigint): DcfPoint { + return new DcfPoint(this.ep.multiplyUnsafe(scalar)) + } + + double(): DcfPoint { + return new DcfPoint(this.ep.double()) + } + + negate(): DcfPoint { + return new DcfPoint(this.ep.negate()) + } +} + +export const DecafPoint: typeof DcfPoint = /* @__PURE__ */ (() => { + // decaf448 base point is ed448 base x 2 + // https://github.com/dalek-cryptography/curve25519-dalek/blob/59837c6ecff02b77b9d5ff84dbc239d0cf33ef90/vendor/ristretto.sage#L699 + if (!DcfPoint.BASE) DcfPoint.BASE = new DcfPoint(ed448.ExtendedPoint.BASE).multiply(_2n) + if (!DcfPoint.ZERO) DcfPoint.ZERO = new DcfPoint(ed448.ExtendedPoint.ZERO) + return DcfPoint +})() + +// Hashing to decaf448. https://www.rfc-editor.org/rfc/rfc9380#appendix-C +export const hashToDecaf448 = (msg: Uint8Array, options: htfBasicOpts): DcfPoint => { + const d = options.DST + const DST = typeof d === 'string' ? utf8ToBytes(d) : d + const uniform_bytes = expand_message_xof(msg, DST, 112, 224, shake256) + const P = DcfPoint.hashToCurve(uniform_bytes) + return P +} +export const hash_to_decaf448: typeof hashToDecaf448 = hashToDecaf448 // legacy diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/index.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/index.ts new file mode 100644 index 00000000000..8b4ac3bb10f --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/index.ts @@ -0,0 +1,17 @@ +/** + * Audited & minimal JS implementation of elliptic curve cryptography. + * @module + * @example +```js +import { secp256k1, schnorr } from '@noble/curves/secp256k1'; +import { ed25519, ed25519ph, ed25519ctx, x25519, RistrettoPoint } from '@noble/curves/ed25519'; +import { ed448, ed448ph, ed448ctx, x448 } from '@noble/curves/ed448'; +import { p256 } from '@noble/curves/p256'; +import { p384 } from '@noble/curves/p384'; +import { p521 } from '@noble/curves/p521'; +import { bls12_381 } from '@noble/curves/bls12-381'; +import { bn254 } from '@noble/curves/bn254'; +import { bytesToHex, hexToBytes, concatBytes, utf8ToBytes } from '@noble/curves/abstract/utils'; +``` + */ +throw new Error('root module cannot be imported: import submodules instead. Check out README') diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/jubjub.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/jubjub.ts new file mode 100644 index 00000000000..7c2c07f240c --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/jubjub.ts @@ -0,0 +1,5 @@ +export { + jubjub_findGroupHash as findGroupHash, + jubjub_groupHash as groupHash, + jubjub, +} from './misc.ts' diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/misc.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/misc.ts new file mode 100644 index 00000000000..6109ab69b02 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/misc.ts @@ -0,0 +1,117 @@ +/** + * Miscellaneous, rarely used curves. + * jubjub, babyjubjub, pallas, vesta. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { blake256 } from '@noble/hashes/blake1' +import { blake2s } from '@noble/hashes/blake2s' +import { sha256, sha512 } from '@noble/hashes/sha2' +import { concatBytes, randomBytes, utf8ToBytes } from '@noble/hashes/utils' +import { getHash } from './_shortw_utils.ts' +import { type CurveFn, type ExtPointType, twistedEdwards } from './abstract/edwards.ts' +import { Field, mod } from './abstract/modular.ts' +import { type CurveFn as WCurveFn, weierstrass } from './abstract/weierstrass.ts' + +// Jubjub curves have 𝔽p over scalar fields of other curves. They are friendly to ZK proofs. +// jubjub Fp = bls n. babyjubjub Fp = bn254 n. +// verify manually, check bls12-381.ts and bn254.ts. +// https://neuromancer.sk/std/other/JubJub + +const bls12_381_Fr = Field( + BigInt('0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001'), +) +const bn254_Fr = Field( + BigInt('21888242871839275222246405745257275088548364400416034343698204186575808495617'), +) + +/** Curve over scalar field of bls12-381. jubjub Fp = bls n */ +export const jubjub: CurveFn = /* @__PURE__ */ twistedEdwards({ + a: BigInt('0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000'), + d: BigInt('0x2a9318e74bfa2b48f5fd9207e6bd7fd4292d7f6d37579d2601065fd6d6343eb1'), + Fp: bls12_381_Fr, + n: BigInt('0xe7db4ea6533afa906673b0101343b00a6682093ccc81082d0970e5ed6f72cb7'), + h: BigInt(8), + Gx: BigInt('0x11dafe5d23e1218086a365b99fbf3d3be72f6afd7d1f72623e6b071492d1122b'), + Gy: BigInt('0x1d523cf1ddab1a1793132e78c866c0c33e26ba5cc220fed7cc3f870e59d292aa'), + hash: sha512, + randomBytes, +} as const) + +/** Curve over scalar field of bn254. babyjubjub Fp = bn254 n */ +export const babyjubjub: CurveFn = /* @__PURE__ */ twistedEdwards({ + a: BigInt(168700), + d: BigInt(168696), + Fp: bn254_Fr, + n: BigInt('21888242871839275222246405745257275088614511777268538073601725287587578984328'), + h: BigInt(8), + Gx: BigInt('995203441582195749578291179787384436505546430278305826713579947235728471134'), + Gy: BigInt('5472060717959818805561601436314318772137091100104008585924551046643952123905'), + hash: blake256, + randomBytes, +} as const) + +const jubjub_gh_first_block = utf8ToBytes( + '096b36a5804bfacef1691e173c366a47ff5ba84a44f26ddd7e8d9f79d5b42df0', +) + +// Returns point at JubJub curve which is prime order and not zero +export function jubjub_groupHash(tag: Uint8Array, personalization: Uint8Array): ExtPointType { + const h = blake2s.create({ personalization, dkLen: 32 }) + h.update(jubjub_gh_first_block) + h.update(tag) + // NOTE: returns ExtendedPoint, in case it will be multiplied later + let p = jubjub.ExtendedPoint.fromHex(h.digest()) + // NOTE: cannot replace with isSmallOrder, returns Point*8 + p = p.multiply(jubjub.CURVE.h) + if (p.equals(jubjub.ExtendedPoint.ZERO)) throw new Error('Point has small order') + return p +} + +// No secret data is leaked here at all. +// It operates over public data: +// const G_SPEND = jubjub.findGroupHash(new Uint8Array(), utf8ToBytes('Item_G_')); +export function jubjub_findGroupHash(m: Uint8Array, personalization: Uint8Array): ExtPointType { + const tag = concatBytes(m, new Uint8Array([0])) + const hashes = [] + for (let i = 0; i < 256; i++) { + tag[tag.length - 1] = i + try { + hashes.push(jubjub_groupHash(tag, personalization)) + } catch (e) {} + } + if (!hashes.length) throw new Error('findGroupHash tag overflow') + return hashes[0] +} + +// Pasta curves. See [Spec](https://o1-labs.github.io/proof-systems/specs/pasta.html). + +export const pasta_p: bigint = BigInt( + '0x40000000000000000000000000000000224698fc094cf91b992d30ed00000001', +) +export const pasta_q: bigint = BigInt( + '0x40000000000000000000000000000000224698fc0994a8dd8c46eb2100000001', +) + +/** https://neuromancer.sk/std/other/Pallas */ +export const pallas: WCurveFn = weierstrass({ + a: BigInt(0), + b: BigInt(5), + Fp: Field(pasta_p), + n: pasta_q, + Gx: mod(BigInt(-1), pasta_p), + Gy: BigInt(2), + h: BigInt(1), + ...getHash(sha256), +}) +/** https://neuromancer.sk/std/other/Vesta */ +export const vesta: WCurveFn = weierstrass({ + a: BigInt(0), + b: BigInt(5), + Fp: Field(pasta_q), + n: pasta_p, + Gx: mod(BigInt(-1), pasta_q), + Gy: BigInt(2), + h: BigInt(1), + ...getHash(sha256), +}) diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/nist.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/nist.ts new file mode 100644 index 00000000000..44c3eb545df --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/nist.ts @@ -0,0 +1,171 @@ +/** + * NIST secp256r1 aka p256. + * https://www.secg.org/sec2-v2.pdf, https://neuromancer.sk/std/nist/P-256 + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { sha256, sha384, sha512 } from '@noble/hashes/sha2' +import { type CurveFnWithCreate, createCurve } from './_shortw_utils.ts' +import { type Hasher, createHasher } from './abstract/hash-to-curve.ts' +import { Field } from './abstract/modular.ts' +import { mapToCurveSimpleSWU } from './abstract/weierstrass.ts' + +const Fp256 = Field(BigInt('0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff')) +const p256_a = Fp256.create(BigInt('-3')) +const p256_b = BigInt('0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b') + +/** + * secp256r1 curve, ECDSA and ECDH methods. + * Field: `2n**224n * (2n**32n-1n) + 2n**192n + 2n**96n-1n` + */ +// prettier-ignore +export const p256: CurveFnWithCreate = createCurve( + { + a: p256_a, + b: p256_b, + Fp: Fp256, + n: BigInt('0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551'), + Gx: BigInt('0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296'), + Gy: BigInt('0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5'), + h: BigInt(1), + lowS: false, + } as const, + sha256, +) +/** Alias to p256. */ +export const secp256r1: CurveFnWithCreate = p256 + +const p256_mapSWU = /* @__PURE__ */ (() => + mapToCurveSimpleSWU(Fp256, { + A: p256_a, + B: p256_b, + Z: Fp256.create(BigInt('-10')), + }))() + +/** Hashing / encoding to p256 points / field. RFC 9380 methods. */ +export const p256_hasher: Hasher = /* @__PURE__ */ (() => + createHasher(secp256r1.ProjectivePoint, (scalars: bigint[]) => p256_mapSWU(scalars[0]), { + DST: 'P256_XMD:SHA-256_SSWU_RO_', + encodeDST: 'P256_XMD:SHA-256_SSWU_NU_', + p: Fp256.ORDER, + m: 1, + k: 128, + expand: 'xmd', + hash: sha256, + }))() + +// Field over which we'll do calculations. +const Fp384 = Field( + BigInt( + '0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff', + ), +) +const p384_a = Fp384.create(BigInt('-3')) +// prettier-ignore +const p384_b = BigInt( + '0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef', +) + +/** + * secp384r1 curve, ECDSA and ECDH methods. + * Field: `2n**384n - 2n**128n - 2n**96n + 2n**32n - 1n`. + * */ +// prettier-ignore +export const p384: CurveFnWithCreate = createCurve( + { + a: p384_a, + b: p384_b, + Fp: Fp384, + n: BigInt( + '0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973', + ), + Gx: BigInt( + '0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7', + ), + Gy: BigInt( + '0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f', + ), + h: BigInt(1), + lowS: false, + } as const, + sha384, +) +/** Alias to p384. */ +export const secp384r1: CurveFnWithCreate = p384 + +const p384_mapSWU = /* @__PURE__ */ (() => + mapToCurveSimpleSWU(Fp384, { + A: p384_a, + B: p384_b, + Z: Fp384.create(BigInt('-12')), + }))() + +/** Hashing / encoding to p384 points / field. RFC 9380 methods. */ +export const p384_hasher: Hasher = /* @__PURE__ */ (() => + createHasher(secp384r1.ProjectivePoint, (scalars: bigint[]) => p384_mapSWU(scalars[0]), { + DST: 'P384_XMD:SHA-384_SSWU_RO_', + encodeDST: 'P384_XMD:SHA-384_SSWU_NU_', + p: Fp384.ORDER, + m: 1, + k: 192, + expand: 'xmd', + hash: sha384, + }))() + +// Field over which we'll do calculations. +const Fp521 = Field( + BigInt( + '0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', + ), +) + +const p521_a = Fp521.create(BigInt('-3')) +const p521_b = BigInt( + '0x0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00', +) + +/** + * NIST secp521r1 aka p521 curve, ECDSA and ECDH methods. + * Field: `2n**521n - 1n`. + */ +// prettier-ignore +export const p521: CurveFnWithCreate = createCurve( + { + a: p521_a, + b: p521_b, + Fp: Fp521, + n: BigInt( + '0x01fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409', + ), + Gx: BigInt( + '0x00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66', + ), + Gy: BigInt( + '0x011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650', + ), + h: BigInt(1), + lowS: false, + allowedPrivateKeyLengths: [130, 131, 132], // P521 keys are variable-length. Normalize to 132b + } as const, + sha512, +) +export const secp521r1: CurveFnWithCreate = p521 + +const p521_mapSWU = /* @__PURE__ */ (() => + mapToCurveSimpleSWU(Fp521, { + A: p521_a, + B: p521_b, + Z: Fp521.create(BigInt('-4')), + }))() + +/** Hashing / encoding to p521 points / field. RFC 9380 methods. */ +export const p521_hasher: Hasher = /* @__PURE__ */ (() => + createHasher(secp521r1.ProjectivePoint, (scalars: bigint[]) => p521_mapSWU(scalars[0]), { + DST: 'P521_XMD:SHA-512_SSWU_RO_', + encodeDST: 'P521_XMD:SHA-512_SSWU_NU_', + p: Fp521.ORDER, + m: 1, + k: 256, + expand: 'xmd', + hash: sha512, + }))() diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/p256.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/p256.ts new file mode 100644 index 00000000000..f1bf69ad74e --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/p256.ts @@ -0,0 +1,23 @@ +/** + * NIST secp256r1 aka p256. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { type HTFMethod } from './abstract/hash-to-curve.ts' +import { p256_hasher, p256 as p256n } from './nist.ts' +/** + * @deprecated Use `@noble/curves/nist` module directly. + */ +export const p256: typeof p256n = p256n +/** + * @deprecated Use `@noble/curves/nist` module directly. + */ +export const secp256r1: typeof p256n = p256n +/** + * @deprecated Use `p256_hasher` from `@noble/curves/nist` module directly. + */ +export const hashToCurve: HTFMethod = /* @__PURE__ */ (() => p256_hasher.hashToCurve)() +/** + * @deprecated Use `p256_hasher` from `@noble/curves/nist` module directly. + */ +export const encodeToCurve: HTFMethod = /* @__PURE__ */ (() => p256_hasher.encodeToCurve)() diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/p384.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/p384.ts new file mode 100644 index 00000000000..f08cc70b11a --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/p384.ts @@ -0,0 +1,23 @@ +/** + * NIST secp384r1 aka p384. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { type HTFMethod } from './abstract/hash-to-curve.ts' +import { p384_hasher, p384 as p384n } from './nist.ts' +/** + * @deprecated Use `@noble/curves/nist` module directly. + */ +export const p384: typeof p384n = p384n +/** + * @deprecated Use `@noble/curves/nist` module directly. + */ +export const secp384r1: typeof p384n = p384n +/** + * @deprecated Use `p384_hasher` from `@noble/curves/nist` module directly. + */ +export const hashToCurve: HTFMethod = /* @__PURE__ */ (() => p384_hasher.hashToCurve)() +/** + * @deprecated Use `p384_hasher` from `@noble/curves/nist` module directly. + */ +export const encodeToCurve: HTFMethod = /* @__PURE__ */ (() => p384_hasher.encodeToCurve)() diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/p521.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/p521.ts new file mode 100644 index 00000000000..feedf852f72 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/p521.ts @@ -0,0 +1,23 @@ +/** + * NIST secp521r1 aka p521. + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { type HTFMethod } from './abstract/hash-to-curve.ts' +import { p521_hasher, p521 as p521n } from './nist.ts' +/** + * @deprecated Use `@noble/curves/nist` module directly. + */ +export const p521: typeof p521n = p521n +/** + * @deprecated Use `@noble/curves/nist` module directly. + */ +export const secp521r1: typeof p521n = p521n +/** + * @deprecated Use `p521_hasher` from `@noble/curves/nist` module directly. + */ +export const hashToCurve: HTFMethod = /* @__PURE__ */ (() => p521_hasher.hashToCurve)() +/** + * @deprecated Use `p521_hasher` from `@noble/curves/nist` module directly. + */ +export const encodeToCurve: HTFMethod = /* @__PURE__ */ (() => p521_hasher.encodeToCurve)() diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/package.json b/packages/evm/src/precompiles/bls12_381/nbl/src/package.json new file mode 100644 index 00000000000..3dbc1ca591c --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/package.json @@ -0,0 +1,3 @@ +{ + "type": "module" +} diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/pasta.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/pasta.ts new file mode 100644 index 00000000000..18aa8b858c7 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/pasta.ts @@ -0,0 +1 @@ +export { pallas, vesta } from './misc.ts' diff --git a/packages/evm/src/precompiles/bls12_381/nbl/src/secp256k1.ts b/packages/evm/src/precompiles/bls12_381/nbl/src/secp256k1.ts new file mode 100644 index 00000000000..810c5698ac6 --- /dev/null +++ b/packages/evm/src/precompiles/bls12_381/nbl/src/secp256k1.ts @@ -0,0 +1,343 @@ +/** + * NIST secp256k1. See [pdf](https://www.secg.org/sec2-v2.pdf). + * + * Seems to be rigid (not backdoored) + * [as per discussion](https://bitcointalk.org/index.php?topic=289795.msg3183975#msg3183975). + * + * secp256k1 belongs to Koblitz curves: it has efficiently computable endomorphism. + * Endomorphism uses 2x less RAM, speeds up precomputation by 2x and ECDH / key recovery by 20%. + * For precomputed wNAF it trades off 1/2 init time & 1/3 ram for 20% perf hit. + * [See explanation](https://gist.github.com/paulmillr/eb670806793e84df628a7c434a873066). + * @module + */ +/*! noble-curves - MIT License (c) 2022 Paul Miller (paulmillr.com) */ +import { sha256 } from '@noble/hashes/sha2' +import { randomBytes } from '@noble/hashes/utils' +import { type CurveFnWithCreate, createCurve } from './_shortw_utils.ts' +import { type HTFMethod, type Hasher, createHasher, isogenyMap } from './abstract/hash-to-curve.ts' +import { Field, mod, pow2 } from './abstract/modular.ts' +import type { Hex, PrivKey } from './abstract/utils.ts' +import { + aInRange, + bytesToNumberBE, + concatBytes, + ensureBytes, + inRange, + numberToBytesBE, +} from './abstract/utils.ts' +import { type ProjPointType as PointType, mapToCurveSimpleSWU } from './abstract/weierstrass.ts' + +const secp256k1P = BigInt('0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f') +const secp256k1N = BigInt('0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141') +const _1n = BigInt(1) +const _2n = BigInt(2) +const divNearest = (a: bigint, b: bigint) => (a + b / _2n) / b + +/** + * √n = n^((p+1)/4) for fields p = 3 mod 4. We unwrap the loop and multiply bit-by-bit. + * (P+1n/4n).toString(2) would produce bits [223x 1, 0, 22x 1, 4x 0, 11, 00] + */ +function sqrtMod(y: bigint): bigint { + const P = secp256k1P + // prettier-ignore + const _3n = BigInt(3), + _6n = BigInt(6), + _11n = BigInt(11), + _22n = BigInt(22) + // prettier-ignore + const _23n = BigInt(23), + _44n = BigInt(44), + _88n = BigInt(88) + const b2 = (y * y * y) % P // x^3, 11 + const b3 = (b2 * b2 * y) % P // x^7 + const b6 = (pow2(b3, _3n, P) * b3) % P + const b9 = (pow2(b6, _3n, P) * b3) % P + const b11 = (pow2(b9, _2n, P) * b2) % P + const b22 = (pow2(b11, _11n, P) * b11) % P + const b44 = (pow2(b22, _22n, P) * b22) % P + const b88 = (pow2(b44, _44n, P) * b44) % P + const b176 = (pow2(b88, _88n, P) * b88) % P + const b220 = (pow2(b176, _44n, P) * b44) % P + const b223 = (pow2(b220, _3n, P) * b3) % P + const t1 = (pow2(b223, _23n, P) * b22) % P + const t2 = (pow2(t1, _6n, P) * b2) % P + const root = pow2(t2, _2n, P) + if (!Fpk1.eql(Fpk1.sqr(root), y)) throw new Error('Cannot find square root') + return root +} + +const Fpk1 = Field(secp256k1P, undefined, undefined, { sqrt: sqrtMod }) + +/** + * secp256k1 curve, ECDSA and ECDH methods. + * + * Field: `2n**256n - 2n**32n - 2n**9n - 2n**8n - 2n**7n - 2n**6n - 2n**4n - 1n` + * + * @example + * ```js + * import { secp256k1 } from '@noble/curves/secp256k1'; + * const priv = secp256k1.utils.randomPrivateKey(); + * const pub = secp256k1.getPublicKey(priv); + * const msg = new Uint8Array(32).fill(1); // message hash (not message) in ecdsa + * const sig = secp256k1.sign(msg, priv); // `{prehash: true}` option is available + * const isValid = secp256k1.verify(sig, msg, pub) === true; + * ``` + */ +export const secp256k1: CurveFnWithCreate = createCurve( + { + a: BigInt(0), + b: BigInt(7), + Fp: Fpk1, + n: secp256k1N, + Gx: BigInt('55066263022277343669578718895168534326250603453777594175500187360389116729240'), + Gy: BigInt('32670510020758816978083085130507043184471273380659243275938904335757337482424'), + h: BigInt(1), // Cofactor + lowS: true, // Allow only low-S signatures by default in sign() and verify() + endo: { + // Endomorphism, see above + beta: BigInt('0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee'), + splitScalar: (k: bigint) => { + const n = secp256k1N + const a1 = BigInt('0x3086d221a7d46bcde86c90e49284eb15') + const b1 = -_1n * BigInt('0xe4437ed6010e88286f547fa90abfe4c3') + const a2 = BigInt('0x114ca50f7a8e2f3f657c1108d9d44cfd8') + const b2 = a1 + const POW_2_128 = BigInt('0x100000000000000000000000000000000') // (2n**128n).toString(16) + + const c1 = divNearest(b2 * k, n) + const c2 = divNearest(-b1 * k, n) + let k1 = mod(k - c1 * a1 - c2 * a2, n) + let k2 = mod(-c1 * b1 - c2 * b2, n) + const k1neg = k1 > POW_2_128 + const k2neg = k2 > POW_2_128 + if (k1neg) k1 = n - k1 + if (k2neg) k2 = n - k2 + if (k1 > POW_2_128 || k2 > POW_2_128) { + throw new Error('splitScalar: Endomorphism failed, k=' + k) + } + return { k1neg, k1, k2neg, k2 } + }, + }, + }, + sha256, +) + +// Schnorr signatures are superior to ECDSA from above. Below is Schnorr-specific BIP0340 code. +// https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki +const _0n = BigInt(0) +/** An object mapping tags to their tagged hash prefix of [SHA256(tag) | SHA256(tag)] */ +const TAGGED_HASH_PREFIXES: { [tag: string]: Uint8Array } = {} +function taggedHash(tag: string, ...messages: Uint8Array[]): Uint8Array { + let tagP = TAGGED_HASH_PREFIXES[tag] + if (tagP === undefined) { + const tagH = sha256(Uint8Array.from(tag, (c) => c.charCodeAt(0))) + tagP = concatBytes(tagH, tagH) + TAGGED_HASH_PREFIXES[tag] = tagP + } + return sha256(concatBytes(tagP, ...messages)) +} + +// ECDSA compact points are 33-byte. Schnorr is 32: we strip first byte 0x02 or 0x03 +const pointToBytes = (point: PointType) => point.toRawBytes(true).slice(1) +const numTo32b = (n: bigint) => numberToBytesBE(n, 32) +const modP = (x: bigint) => mod(x, secp256k1P) +const modN = (x: bigint) => mod(x, secp256k1N) +const Point = secp256k1.ProjectivePoint +const GmulAdd = (Q: PointType, a: bigint, b: bigint) => + Point.BASE.multiplyAndAddUnsafe(Q, a, b) + +// Calculate point, scalar and bytes +function schnorrGetExtPubKey(priv: PrivKey) { + let d_ = secp256k1.utils.normPrivateKeyToScalar(priv) // same method executed in fromPrivateKey + let p = Point.fromPrivateKey(d_) // P = d'⋅G; 0 < d' < n check is done inside + const scalar = p.hasEvenY() ? d_ : modN(-d_) + return { scalar: scalar, bytes: pointToBytes(p) } +} +/** + * lift_x from BIP340. Convert 32-byte x coordinate to elliptic curve point. + * @returns valid point checked for being on-curve + */ +function lift_x(x: bigint): PointType { + aInRange('x', x, _1n, secp256k1P) // Fail if x ≥ p. + const xx = modP(x * x) + const c = modP(xx * x + BigInt(7)) // Let c = x³ + 7 mod p. + let y = sqrtMod(c) // Let y = c^(p+1)/4 mod p. + if (y % _2n !== _0n) y = modP(-y) // Return the unique point P such that x(P) = x and + const p = new Point(x, y, _1n) // y(P) = y if y mod 2 = 0 or y(P) = p-y otherwise. + p.assertValidity() + return p +} +const num = bytesToNumberBE +/** + * Create tagged hash, convert it to bigint, reduce modulo-n. + */ +function challenge(...args: Uint8Array[]): bigint { + return modN(num(taggedHash('BIP0340/challenge', ...args))) +} + +/** + * Schnorr public key is just `x` coordinate of Point as per BIP340. + */ +function schnorrGetPublicKey(privateKey: Hex): Uint8Array { + return schnorrGetExtPubKey(privateKey).bytes // d'=int(sk). Fail if d'=0 or d'≥n. Ret bytes(d'⋅G) +} + +/** + * Creates Schnorr signature as per BIP340. Verifies itself before returning anything. + * auxRand is optional and is not the sole source of k generation: bad CSPRNG won't be dangerous. + */ +function schnorrSign( + message: Hex, + privateKey: PrivKey, + auxRand: Hex = randomBytes(32), +): Uint8Array { + const m = ensureBytes('message', message) + const { bytes: px, scalar: d } = schnorrGetExtPubKey(privateKey) // checks for isWithinCurveOrder + const a = ensureBytes('auxRand', auxRand, 32) // Auxiliary random data a: a 32-byte array + const t = numTo32b(d ^ num(taggedHash('BIP0340/aux', a))) // Let t be the byte-wise xor of bytes(d) and hash/aux(a) + const rand = taggedHash('BIP0340/nonce', t, px, m) // Let rand = hash/nonce(t || bytes(P) || m) + const k_ = modN(num(rand)) // Let k' = int(rand) mod n + if (k_ === _0n) throw new Error('sign failed: k is zero') // Fail if k' = 0. + const { bytes: rx, scalar: k } = schnorrGetExtPubKey(k_) // Let R = k'⋅G. + const e = challenge(rx, px, m) // Let e = int(hash/challenge(bytes(R) || bytes(P) || m)) mod n. + const sig = new Uint8Array(64) // Let sig = bytes(R) || bytes((k + ed) mod n). + sig.set(rx, 0) + sig.set(numTo32b(modN(k + e * d)), 32) + // If Verify(bytes(P), m, sig) (see below) returns failure, abort + if (!schnorrVerify(sig, m, px)) throw new Error('sign: Invalid signature produced') + return sig +} + +/** + * Verifies Schnorr signature. + * Will swallow errors & return false except for initial type validation of arguments. + */ +function schnorrVerify(signature: Hex, message: Hex, publicKey: Hex): boolean { + const sig = ensureBytes('signature', signature, 64) + const m = ensureBytes('message', message) + const pub = ensureBytes('publicKey', publicKey, 32) + try { + const P = lift_x(num(pub)) // P = lift_x(int(pk)); fail if that fails + const r = num(sig.subarray(0, 32)) // Let r = int(sig[0:32]); fail if r ≥ p. + if (!inRange(r, _1n, secp256k1P)) return false + const s = num(sig.subarray(32, 64)) // Let s = int(sig[32:64]); fail if s ≥ n. + if (!inRange(s, _1n, secp256k1N)) return false + const e = challenge(numTo32b(r), pointToBytes(P), m) // int(challenge(bytes(r)||bytes(P)||m))%n + const R = GmulAdd(P, s, modN(-e)) // R = s⋅G - e⋅P + if (!R || !R.hasEvenY() || R.toAffine().x !== r) return false // -eP == (n-e)P + return true // Fail if is_infinite(R) / not has_even_y(R) / x(R) ≠ r. + } catch (error) { + return false + } +} + +export type SecpSchnorr = { + getPublicKey: typeof schnorrGetPublicKey + sign: typeof schnorrSign + verify: typeof schnorrVerify + utils: { + randomPrivateKey: () => Uint8Array + lift_x: typeof lift_x + pointToBytes: (point: PointType) => Uint8Array + numberToBytesBE: typeof numberToBytesBE + bytesToNumberBE: typeof bytesToNumberBE + taggedHash: typeof taggedHash + mod: typeof mod + } +} +/** + * Schnorr signatures over secp256k1. + * https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki + * @example + * ```js + * import { schnorr } from '@noble/curves/secp256k1'; + * const priv = schnorr.utils.randomPrivateKey(); + * const pub = schnorr.getPublicKey(priv); + * const msg = new TextEncoder().encode('hello'); + * const sig = schnorr.sign(msg, priv); + * const isValid = schnorr.verify(sig, msg, pub); + * ``` + */ +export const schnorr: SecpSchnorr = /* @__PURE__ */ (() => ({ + getPublicKey: schnorrGetPublicKey, + sign: schnorrSign, + verify: schnorrVerify, + utils: { + randomPrivateKey: secp256k1.utils.randomPrivateKey, + lift_x, + pointToBytes, + numberToBytesBE, + bytesToNumberBE, + taggedHash, + mod, + }, +}))() + +const isoMap = /* @__PURE__ */ (() => + isogenyMap( + Fpk1, + [ + // xNum + [ + '0x8e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38daaaaa8c7', + '0x7d3d4c80bc321d5b9f315cea7fd44c5d595d2fc0bf63b92dfff1044f17c6581', + '0x534c328d23f234e6e2a413deca25caece4506144037c40314ecbd0b53d9dd262', + '0x8e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38e38daaaaa88c', + ], + // xDen + [ + '0xd35771193d94918a9ca34ccbb7b640dd86cd409542f8487d9fe6b745781eb49b', + '0xedadc6f64383dc1df7c4b2d51b54225406d36b641f5e41bbc52a56612a8c6d14', + '0x0000000000000000000000000000000000000000000000000000000000000001', // LAST 1 + ], + // yNum + [ + '0x4bda12f684bda12f684bda12f684bda12f684bda12f684bda12f684b8e38e23c', + '0xc75e0c32d5cb7c0fa9d0a54b12a0a6d5647ab046d686da6fdffc90fc201d71a3', + '0x29a6194691f91a73715209ef6512e576722830a201be2018a765e85a9ecee931', + '0x2f684bda12f684bda12f684bda12f684bda12f684bda12f684bda12f38e38d84', + ], + // yDen + [ + '0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffff93b', + '0x7a06534bb8bdb49fd5e9e6632722c2989467c1bfc8e8d978dfb425d2685c2573', + '0x6484aa716545ca2cf3a70c3fa8fe337e0a3d21162f0d6299a7bf8192bfd2a76f', + '0x0000000000000000000000000000000000000000000000000000000000000001', // LAST 1 + ], + ].map((i) => i.map((j) => BigInt(j))) as [bigint[], bigint[], bigint[], bigint[]], + ))() +const mapSWU = /* @__PURE__ */ (() => + mapToCurveSimpleSWU(Fpk1, { + A: BigInt('0x3f8731abdd661adca08a5558f0f5d272e953d363cb6f0e5d405447c01a444533'), + B: BigInt('1771'), + Z: Fpk1.create(BigInt('-11')), + }))() +/** Hashing / encoding to secp256k1 points / field. RFC 9380 methods. */ +export const secp256k1_hasher: Hasher = /* @__PURE__ */ (() => + createHasher( + secp256k1.ProjectivePoint, + (scalars: bigint[]) => { + const { x, y } = mapSWU(Fpk1.create(scalars[0])) + return isoMap(x, y) + }, + { + DST: 'secp256k1_XMD:SHA-256_SSWU_RO_', + encodeDST: 'secp256k1_XMD:SHA-256_SSWU_NU_', + p: Fpk1.ORDER, + m: 1, + k: 128, + expand: 'xmd', + hash: sha256, + } as const, + ))() + +/** + * @deprecated Use `secp256k1_hasher` + */ +export const hashToCurve: HTFMethod = /* @__PURE__ */ (() => secp256k1_hasher.hashToCurve)() + +/** + * @deprecated Use `secp256k1_hasher` + */ +export const encodeToCurve: HTFMethod = /* @__PURE__ */ (() => + secp256k1_hasher.encodeToCurve)() diff --git a/packages/evm/src/precompiles/bls12_381/noble.ts b/packages/evm/src/precompiles/bls12_381/noble.ts index 7be334127e0..89cd7a3598c 100644 --- a/packages/evm/src/precompiles/bls12_381/noble.ts +++ b/packages/evm/src/precompiles/bls12_381/noble.ts @@ -6,7 +6,7 @@ import { equalsBytes, setLengthLeft, } from '@ethereumjs/util' -import { bls12_381 } from '@noble/curves/bls12-381' +import { bls12_381 } from './nbl/src/bls12-381.ts' import { EVMError, EVMErrorMessages } from '../../errors.ts' @@ -20,9 +20,9 @@ import { BLS_ZERO_BUFFER, } from './constants.ts' -import type { Fp2 } from '@noble/curves/abstract/tower' -import type { AffinePoint } from '@noble/curves/abstract/weierstrass' import type { EVMBLSInterface } from '../../types.ts' +import type { Fp2 } from './nbl/src/abstract/tower.ts' +import type { AffinePoint } from './nbl/src/abstract/weierstrass.ts' const G1_ZERO = bls12_381.G1.ProjectivePoint.ZERO const G2_ZERO = bls12_381.G2.ProjectivePoint.ZERO diff --git a/packages/vm/coverage.sh b/packages/vm/coverage.sh new file mode 100644 index 00000000000..b7edd5a2935 --- /dev/null +++ b/packages/vm/coverage.sh @@ -0,0 +1,4 @@ +npx vitest run ./test/tester/testRunnerVitest \ + --coverage.enabled \ + --coverage.allowExternal \ + && open ./coverage/index.html \ No newline at end of file diff --git a/packages/vm/test/tester/runners/BlockchainTestsRunner.ts b/packages/vm/test/tester/runners/BlockchainTestsRunner.ts index 91557937781..e1b22283e3d 100644 --- a/packages/vm/test/tester/runners/BlockchainTestsRunner.ts +++ b/packages/vm/test/tester/runners/BlockchainTestsRunner.ts @@ -35,13 +35,14 @@ function formatBlockHeader(data: any) { return formatted } -export async function runBlockchainTest(options: any, testData: any, t: tape.Test) { +export async function runBlockchainTest(options: any, testData: any) { // ensure that the test data is the right fork data if (testData.network !== options.forkConfigTestSuite) { - t.comment(`skipping test: no data available for ${options.forkConfigTestSuite}`) return } + console.log('Running test, description: ', testData._info?.description) + // fix for BlockchainTests/GeneralStateTests/stRandom/* testData.lastblockhash = stripHexPrefix(testData.lastblockhash) @@ -71,9 +72,6 @@ export async function runBlockchainTest(options: any, testData: any, t: tape.Tes // Only run with block validation when sealEngine present in test file // and being set to Ethash PoW validation if (testData.sealEngine === 'Ethash') { - if (common.consensusAlgorithm() !== ConsensusAlgorithm.Ethash) { - t.skip('SealEngine setting is not matching chain consensus type, skip test.') - } validatePow = true } @@ -85,7 +83,6 @@ export async function runBlockchainTest(options: any, testData: any, t: tape.Tes if (typeof testData.genesisRLP === 'string') { const rlp = toBytes(testData.genesisRLP) - t.deepEquals(genesisBlock.serialize(), rlp, 'correct genesis RLP') } const consensusDict: ConsensusDict = {} @@ -122,19 +119,7 @@ export async function runBlockchainTest(options: any, testData: any, t: tape.Tes // set up pre-state await setupPreConditions(vm.stateManager, testData) - t.deepEquals( - await vm.stateManager.getStateRoot(), - genesisBlock.header.stateRoot, - 'correct pre stateRoot', - ) - - async function handleError(error: string | undefined, expectException: string | boolean) { - if (expectException !== false) { - t.pass(`Expected exception ${expectException}`) - } else { - t.fail(error) - } - } + async function handleError(error: string | undefined, expectException: string | boolean) {} let currentBlock = BigInt(0) for (const raw of testData.blocks) { @@ -185,20 +170,6 @@ export async function runBlockchainTest(options: any, testData: any, t: tape.Tes string >[]) { const shouldFail = txData.valid === 'false' - try { - const txRLP = hexToBytes(txData.rawBytes as PrefixedHexString) - const tx = createTxFromRLP(txRLP, { common }) - await blockBuilder.addTransaction(tx) - if (shouldFail) { - t.fail('tx should fail, but did not fail') - } - } catch (e: any) { - if (!shouldFail) { - t.fail(`tx should not fail, but failed: ${e.message}`) - } else { - t.pass('tx successfully failed') - } - } } await blockBuilder.revert() // will only revert if checkpointed } @@ -262,14 +233,13 @@ export async function runBlockchainTest(options: any, testData: any, t: tape.Tes const headBlock = await (vm.blockchain as Blockchain).getIteratorHead() await vm.stateManager.setStateRoot(headBlock.header.stateRoot) } else { - await verifyPostConditions(stateTree, testData.postState, t) + //await verifyPostConditions(stateTree, testData.postState, t) } throw e } if (expectException !== false) { - t.fail(`expected exception but test did not throw an exception: ${expectException}`) return } } catch (error: any) { @@ -279,16 +249,6 @@ export async function runBlockchainTest(options: any, testData: any, t: tape.Tes } } - t.equal( - bytesToHex(blockchain['_headHeaderHash']), - '0x' + testData.lastblockhash, - 'correct last header block', - ) - - const end = Date.now() - const timeSpent = `${(end - begin) / 1000} secs` - t.comment(`Time: ${timeSpent}`) - // Explicitly delete objects for memory optimization (early GC) common = blockchain = stateTree = stateManager = vm = cacheDB = null as any } diff --git a/packages/vm/test/tester/testRunnerVitest.spec.ts b/packages/vm/test/tester/testRunnerVitest.spec.ts new file mode 100755 index 00000000000..2b4bd27163f --- /dev/null +++ b/packages/vm/test/tester/testRunnerVitest.spec.ts @@ -0,0 +1,310 @@ +import * as path from 'path' +/* eslint-disable no-console */ +import { MCLBLS, NobleBLS, NobleBN254, RustBN254 } from '@ethereumjs/evm' +import { trustedSetup } from '@paulmillr/trusted-setups/fast.js' +import * as mcl from 'mcl-wasm' +import { KZG as microEthKZG } from 'micro-eth-signer/kzg' +import * as minimist from 'minimist' +import * as process from 'process' +import { initRustBN } from 'rustbn-wasm' +import * as tape from 'tape' +import { assert, describe, it } from 'vitest' + +import { + DEFAULT_FORK_CONFIG, + DEFAULT_TESTS_PATH, + getCommon, + getExpectedTests, + getRequiredForkConfigAlias, + getSkipTests, + getTestDirs, +} from './config.js' +import { runBlockchainTest } from './runners/BlockchainTestsRunner.js' +import { runStateTest } from './runners/GeneralStateTestsRunner.js' +import { getTestFromSource, getTestsFromArgs } from './testLoader.js' + +import type { Common } from '@ethereumjs/common' +import type { EVMBLSInterface, EVMBN254Interface } from '@ethereumjs/evm' + +/** + * Test runner + * CLI arguments: + * --state: boolean. Run state tests + * --blockchain: boolean. Run blockchain tests + * --fork: string. Fork to use for these tests + * --skip: string. Comma-separated list of tests to skip. choices of: all,broken,permanent,slow. Defaults to all + * --runSkipped: string. Comma-separated list of tests to skip if --skip is not set. choices of: all,broken,permanent,slow. Defaults to none + * --file: string. Test file to run + * --test: string. Test name to run + * --dir: string. Test directory to look for tests + * --excludeDir: string. Test directory to exclude from testing + * --testsPath: string. Root directory of tests to look (default: '../ethereum-tests') + * --customTestsPath: string. Custom directory to look for tests (e.g. '../../my_custom_test_folder') + * --customStateTest: string. Run a file with a custom state test (not in test directory) + * --jsontrace: boolean. Enable json step tracing in state tests + * --dist: boolean. Use the compiled version of the VM + * --data: number. Only run this state test if the transaction has this calldata + * --gas: number. Only run this state test if the transaction has this gasLimit + * --value: number. Only run this state test if the transaction has this call value + * --debug: boolean. Enable BlockchainTests debugger (compares post state against the expected post state) + * --expected-test-amount: number. If passed, check after tests are ran if at least this amount of tests have passed (inclusive) + * --verify-test-amount-alltests: number. If passed, get the expected amount from tests and verify afterwards if this is the count of tests (expects tests are ran with default settings) + * --reps: number. If passed, each test case will be run the number of times indicated + * --bls: string. BLS library being used, choices: Noble, MCL (default: MCL) + * --bn254: string. BN254 (alt_BN128) library being used, choices: Noble, RustBN (default: RustBN) + * --profile If this flag is passed, the state/blockchain tests will profile + */ + +const argv = { + _: [], + 'stack-size': 1500, + blockchain: true, + fork: 'Prague', + dir: '../fixtures/blockchain_tests/prague/eip2537_bls_12_381_precompiles/bls12_g1add', +} + +describe('VM tests', () => { + it('run all tests', async () => { + async function runTests() { + let name: 'GeneralStateTests' | 'BlockchainTests' + let runner: any + if ((argv.state as boolean) === true) { + name = 'GeneralStateTests' + runner = runStateTest + } else if ((argv.blockchain as boolean) === true) { + name = 'BlockchainTests' + runner = runBlockchainTest + } else { + console.log('TEST', argv) + console.log(`Test type not supported or provided`) + process.exit(1) + } + + const RUN_PROFILER: boolean = argv.profile ?? false + + const FORK_CONFIG: string = argv.fork !== undefined ? argv.fork : DEFAULT_FORK_CONFIG + const FORK_CONFIG_TEST_SUITE = getRequiredForkConfigAlias(FORK_CONFIG) + + // Examples: Istanbul -> istanbul, MuirGlacier -> muirGlacier + const FORK_CONFIG_VM = FORK_CONFIG.charAt(0).toLowerCase() + FORK_CONFIG.substring(1) + + /** + * Configuration for getting the tests from the ethereum/tests repository + */ + const testGetterArgs: { + skipTests: string[] + runSkipped: string[] + forkConfig: string + file?: string + test?: string + dir?: string + excludeDir?: string + testsPath?: string + customStateTest?: string + directory?: string + } = { + skipTests: getSkipTests(argv.skip, argv.runSkipped !== undefined ? 'NONE' : 'ALL'), + runSkipped: getSkipTests(argv.runSkipped, 'NONE'), + forkConfig: FORK_CONFIG_TEST_SUITE, + file: argv.file, + test: argv.test, + dir: argv.dir, + excludeDir: argv.excludeDir, + testsPath: argv.testsPath, + customStateTest: argv.customStateTest, + } + + let bls: EVMBLSInterface + if (argv.bls !== undefined && argv.bls.toLowerCase() === 'mcl') { + await mcl.init(mcl.BLS12_381) + bls = new MCLBLS(mcl) + console.log('BLS library used: MCL (WASM)') + } else { + console.log('BLS library used: Noble (JavaScript)') + bls = new NobleBLS() + } + + let bn254: EVMBN254Interface + if (argv.bn254 !== undefined && argv.bn254.toLowerCase() === 'mcl') { + const rustBN = await initRustBN() + bn254 = new RustBN254(rustBN) + console.log('BN254 (alt_BN128) library used: rustbn.js (WASM)') + } else { + console.log('BN254 (alt_BN128) library used: Noble (JavaScript)') + bn254 = new NobleBN254() + } + + /** + * Run-time configuration + */ + const kzg = new microEthKZG(trustedSetup) + const runnerArgs: { + forkConfigVM: string + forkConfigTestSuite: string + common: Common + jsontrace?: boolean + dist?: boolean + data?: number + gasLimit?: number + value?: number + debug?: boolean + reps?: number + profile: boolean + bls: EVMBLSInterface + bn254: EVMBN254Interface + stateManager: string + } = { + forkConfigVM: FORK_CONFIG_VM, + forkConfigTestSuite: FORK_CONFIG_TEST_SUITE, + common: getCommon(FORK_CONFIG_VM, kzg), + jsontrace: argv.jsontrace, + dist: argv.dist, + data: argv.data, // GeneralStateTests + gasLimit: argv.gas, // GeneralStateTests + value: argv.value, // GeneralStateTests + debug: argv.debug, // BlockchainTests + reps: argv.reps, // test repetitions + bls, + profile: RUN_PROFILER, + bn254, + stateManager: argv.stateManager, + } + + /** + * Modify the forkConfig string to ensure it works with RegEx (escape `+` characters) + */ + if (testGetterArgs.forkConfig.includes('+')) { + let str = testGetterArgs.forkConfig + const indices = [] + for (let i = 0; i < str.length; i++) { + if (str[i] === '+') { + indices.push(i) + } + } + // traverse array in reverse order to ensure indices match when we replace the '+' with '/+' + for (let i = indices.length - 1; i >= 0; i--) { + str = `${str.substr(0, indices[i])}\\${str.substr(indices[i])}` + } + testGetterArgs.forkConfig = str + } + + const expectedTests: number | undefined = + argv['verify-test-amount-alltests'] > 0 + ? getExpectedTests(FORK_CONFIG_VM, name) + : argv['expected-test-amount'] !== undefined && argv['expected-test-amount'] > 0 + ? argv['expected-test-amount'] + : undefined + + /** + * Initialization output to console + */ + const width = 50 + const fillWidth = width + const fillParam = 20 + const delimiter = `| `.padEnd(fillWidth) + ' |' + const formatArgs = (args: any) => { + return Object.assign( + {}, + ...Object.entries(args) + .filter(([_k, v]) => typeof v === 'string' || (Array.isArray(v) && v.length !== 0)) + .map(([k, v]) => ({ + [k]: Array.isArray(v) && v.length > 0 ? v.length : v, + })), + ) + } + const formattedGetterArgs = formatArgs(testGetterArgs) + const formattedRunnerArgs = formatArgs(runnerArgs) + + console.log(`+${'-'.repeat(width)}+`) + console.log(`| VM -> ${name} `.padEnd(fillWidth) + ' |') + console.log(delimiter) + console.log(`| TestGetterArgs`.padEnd(fillWidth) + ' |') + for (const [key, value] of Object.entries(formattedGetterArgs)) { + console.log(`| ${key.padEnd(fillParam)}: ${value}`.padEnd(fillWidth) + ' |') + } + console.log(delimiter) + console.log(`| RunnerArgs`.padEnd(fillWidth) + ' |') + for (const [key, value] of Object.entries(formattedRunnerArgs)) { + if (key === 'common') { + const hf = (value as Common).hardfork() + console.log(`| ${key.padEnd(fillParam)}: ${hf}`.padEnd(fillWidth) + ' |') + } else { + console.log(`| ${key.padEnd(fillParam)}: ${value}`.padEnd(fillWidth) + ' |') + } + } + console.log(`+${'-'.repeat(width)}+`) + console.log() + + if (argv.customStateTest !== undefined) { + const fileName: string = argv.customStateTest + //@ts-ignore tsx/esbuild can't figure out this namespace import thing but it works fine :shrug: + tape(name, (t) => { + getTestFromSource(fileName, async (err: string | null, test: any) => { + if (err !== null) { + return t.fail(err) + } + t.comment(`file: ${fileName} test: ${test.testName}`) + await runStateTest(runnerArgs, test, t) + t.end() + }) + }) + } else { + let testIdentifier: string + const failingTests: Record = {} + // Tests for HFs before Istanbul have been moved under `LegacyTests/Constantinople`: + // https://github.com/ethereum/tests/releases/tag/v7.0.0-beta.1 + + const dirs = getTestDirs(FORK_CONFIG_VM, name) + console.time('Total (including setup)') + for (const dir of dirs) { + await new Promise((resolve, reject) => { + if (argv.customTestsPath !== undefined) { + testGetterArgs.directory = argv.customTestsPath as string + } else { + const testDir = testGetterArgs.dir ?? '' + const testsPath = testGetterArgs.testsPath ?? DEFAULT_TESTS_PATH + testGetterArgs.directory = path.join(testsPath, dir, testDir) + } + console.log('default method call') + getTestsFromArgs( + dir, + async (fileName: string, subDir: string, testName: string, test: any) => { + const runSkipped = testGetterArgs.runSkipped + const inRunSkipped = runSkipped.includes(fileName) + if (runSkipped.length === 0 || inRunSkipped === true) { + testIdentifier = `file: ${subDir} test: ${testName}` + await runner(runnerArgs, test) + } + }, + testGetterArgs, + ) + .then(() => { + resolve() + }) + .catch((error: string) => { + reject() + }) + }) + } + + for (const failingTestIdentifier in failingTests) { + console.log(`Errors thrown in ${failingTestIdentifier}:`) + const errors = failingTests[failingTestIdentifier] as string[] + for (let i = 0; i < errors.length; i++) { + console.log('\t' + errors[i]) + } + } + + if (expectedTests !== undefined) { + const { assertCount } = t as any + t.ok(assertCount >= expectedTests, `expected ${expectedTests} checks, got ${assertCount}`) + } + + console.log() + console.timeEnd('Total (including setup)') + } + } + + await runTests() // eslint-disable-line @typescript-eslint/no-floating-promises + }, 0) +}, 0) diff --git a/packages/vm/vitest.config.ts b/packages/vm/vitest.config.ts index 5f639465bc4..604f5586aaf 100644 --- a/packages/vm/vitest.config.ts +++ b/packages/vm/vitest.config.ts @@ -5,9 +5,22 @@ import baseConfig from '../../config/vitest.config.mts' export default mergeConfig( baseConfig, defineConfig({ + //root: '../..', plugins: [topLevelAwait()], optimizeDeps: { - exclude: ['kzg-wasm'], + exclude: ['kzg-wasm', '@noble/curves'], + }, + ssr: { + noExternal: ['@noble/curves'], + }, + test: { + coverage: { + enabled: true, + allowExternal: true, + include: ['**/packages/*/src/**/*.{ts,js}', '**/packages/noble-curves/**/*.{ts,js}'], + exclude: ['**/packages/ethereum-tests'], + reporter: ['text', 'html'], + }, }, }), )