diff --git a/.all-contributorsrc b/.github/.all-contributorsrc similarity index 100% rename from .all-contributorsrc rename to .github/.all-contributorsrc diff --git a/archive_tmp/bls12_381/g1.cairo b/archive_tmp/bls12_381/g1.cairo deleted file mode 100644 index 6029f233..00000000 --- a/archive_tmp/bls12_381/g1.cairo +++ /dev/null @@ -1,516 +0,0 @@ -from src.bls12_381.fq import ( - is_zero, - verify_zero7, - fq_bigint4, - BigInt4, - UnreducedBigInt7, - bigint4_mul, -) -from src.bls12_381.curve import P0, P1, P2, P3, BASE, N_LIMBS, DEGREE -from starkware.cairo.common.registers import get_fp_and_pc - -// Represents a point on the elliptic curve. -// The zero point is represented using pt.x=0, as there is no point on the curve with this x value. -struct G1Point { - x: BigInt4*, - y: BigInt4*, -} -struct G1PointFull { - x: BigInt4, - y: BigInt4, -} - -namespace g1 { - func get_g1_generator() -> G1Point* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: BigInt4 = BigInt4( - 77209383603911340680728987323, - 49921657856232494206459177023, - 24654436777218005952848247045, - 7410505851925769877053596556 - ); - local y: BigInt4 = BigInt4( - 50301641395870356052675782625, - 264871839152097495342696260, - 35935975898704859035952220918, - 2693432453738686426327691501 - ); - local res = G1Point(&x, &y); - return &res; - } - func assert_on_curve{range_check_ptr}(pt: G1Point) -> () { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let left = fq_bigint4.mul(pt.y, pt.y); - let x_sq = fq_bigint4.mul(pt.x, pt.x); - let x_cube = fq_bigint4.mul(x_sq, pt.x); - - assert left.d0 = x_cube.d0 + 4; - assert left.d1 = x_cube.d1; - assert left.d2 = x_cube.d2; - assert left.d3 = x_cube.d3; - - return (); - } - func assert_equal(pt1: G1Point*, pt2: G1Point*) -> () { - assert pt1.x.d0 = pt2.x.d0; - assert pt1.x.d1 = pt2.x.d1; - assert pt1.x.d2 = pt2.x.d2; - assert pt1.x.d3 = pt2.x.d3; - assert pt1.y.d0 = pt2.y.d0; - assert pt1.y.d1 = pt2.y.d1; - assert pt1.y.d2 = pt2.y.d2; - assert pt1.y.d3 = pt2.y.d3; - return (); - } - func compute_doubling_slope{range_check_ptr}(pt: G1PointFull) -> (slope: BigInt4) { - // Note that y cannot be zero: assume that it is, then pt = -pt, so 2 * pt = 0, which - // contradicts the fact that the size of the curve is odd. - alloc_locals; - local slope: BigInt4; - %{ - from starkware.python.math_utils import div_mod - - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - x,y,p=0,0,0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - for i in range(ids.N_LIMBS): - x+=getattr(ids.pt.x, 'd'+str(i)) * ids.BASE**i - y+=getattr(ids.pt.y, 'd'+str(i)) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - slope = split(div_mod(3 * x ** 2, 2 * y, p)) - - for i in range(ids.N_LIMBS): - setattr(ids.slope, 'd'+str(i), slope[i]) - %} - - let (x_sqr: UnreducedBigInt7) = bigint4_mul(pt.x, pt.x); - let (slope_y: UnreducedBigInt7) = bigint4_mul(slope, pt.y); - - verify_zero7( - UnreducedBigInt7( - d0=3 * x_sqr.d0 - 2 * slope_y.d0, - d1=3 * x_sqr.d1 - 2 * slope_y.d1, - d2=3 * x_sqr.d2 - 2 * slope_y.d2, - d3=3 * x_sqr.d3 - 2 * slope_y.d3, - d4=3 * x_sqr.d4 - 2 * slope_y.d4, - d5=3 * x_sqr.d5 - 2 * slope_y.d5, - d6=3 * x_sqr.d6 - 2 * slope_y.d6, - ), - ); - - return (slope=slope); - } - - // Returns the slope of the line connecting the two given points. - // The slope is used to compute pt0 + pt1. - // Assumption: pt0.x != pt1.x (mod field prime). - func compute_slope{range_check_ptr}(pt0: G1PointFull, pt1: G1PointFull) -> (slope: BigInt4) { - alloc_locals; - local slope: BigInt4; - %{ - from starkware.python.math_utils import div_mod - - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - x0,y0,x1,y1,p=0,0,0,0,0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - for i in range(ids.N_LIMBS): - x0+=getattr(ids.pt0.x, 'd'+str(i)) * ids.BASE**i - y0+=getattr(ids.pt0.y, 'd'+str(i)) * ids.BASE**i - x1+=getattr(ids.pt1.x, 'd'+str(i)) * ids.BASE**i - y1+=getattr(ids.pt1.y, 'd'+str(i)) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - slope = split(div_mod(y0 - y1, x0 - x1, p)) - - for i in range(ids.N_LIMBS): - setattr(ids.slope, 'd'+str(i), slope[i]) - %} - - let x_diff = BigInt4( - d0=pt0.x.d0 - pt1.x.d0, - d1=pt0.x.d1 - pt1.x.d1, - d2=pt0.x.d2 - pt1.x.d2, - d3=pt0.x.d3 - pt1.x.d3, - ); - let (x_diff_slope: UnreducedBigInt7) = bigint4_mul(x_diff, slope); - - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope.d0 - pt0.y.d0 + pt1.y.d0, - d1=x_diff_slope.d1 - pt0.y.d1 + pt1.y.d1, - d2=x_diff_slope.d2 - pt0.y.d2 + pt1.y.d2, - d3=x_diff_slope.d3 - pt0.y.d3 + pt1.y.d3, - d4=x_diff_slope.d4, - d5=x_diff_slope.d5, - d6=x_diff_slope.d6, - ), - ); - - return (slope,); - } - - // Given a point 'pt' on the elliptic curve, computes pt + pt. - func double{range_check_ptr}(pt: G1PointFull) -> (res: G1PointFull) { - alloc_locals; - if (pt.x.d0 == 0) { - if (pt.x.d1 == 0) { - if (pt.x.d2 == 0) { - return (pt,); - } - } - } - - let (slope: BigInt4) = compute_doubling_slope(pt); - let (slope_sqr: UnreducedBigInt7) = bigint4_mul(slope, slope); - local new_x: BigInt4; - local new_y: BigInt4; - %{ - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - x,y,slope,p=0,0,0,0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - for i in range(ids.N_LIMBS): - x+=getattr(ids.pt.x, 'd'+str(i)) * ids.BASE**i - y+=getattr(ids.pt.y, 'd'+str(i)) * ids.BASE**i - slope+=getattr(ids.slope, 'd'+str(i)) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - new_x = (pow(slope, 2, P) - 2 * x) % p - new_y = (slope * (x - new_x) - y) % p - new_xs, new_ys = split(new_x), split(new_y) - - for i in range(ids.N_LIMBS): - setattr(ids.new_x, 'd'+str(i), new_xs[i]) - setattr(ids.new_y, 'd'+str(i), new_ys[i]) - %} - - verify_zero7( - UnreducedBigInt7( - d0=slope_sqr.d0 - new_x.d0 - 2 * pt.x.d0, - d1=slope_sqr.d1 - new_x.d1 - 2 * pt.x.d1, - d2=slope_sqr.d2 - new_x.d2 - 2 * pt.x.d2, - d3=slope_sqr.d3 - new_x.d3 - 2 * pt.x.d3, - d4=slope_sqr.d4, - d5=slope_sqr.d5, - d6=slope_sqr.d6, - ), - ); - - let (x_diff_slope: UnreducedBigInt7) = bigint4_mul( - BigInt4( - d0=pt.x.d0 - new_x.d0, - d1=pt.x.d1 - new_x.d1, - d2=pt.x.d2 - new_x.d2, - d3=pt.x.d3 - new_x.d3, - ), - slope, - ); - - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope.d0 - pt.y.d0 - new_y.d0, - d1=x_diff_slope.d1 - pt.y.d1 - new_y.d1, - d2=x_diff_slope.d2 - pt.y.d2 - new_y.d2, - d3=x_diff_slope.d3 - pt.y.d3 - new_y.d3, - d4=x_diff_slope.d4, - d5=x_diff_slope.d5, - d6=x_diff_slope.d6, - ), - ); - - return (G1PointFull(new_x, new_y),); - } - - // Adds two points on the elliptic curve. - // Assumption: pt0.x != pt1.x (however, pt0 = pt1 = 0 is allowed). - // Note that this means that the function cannot be used if pt0 = pt1 - // (use ec_double() in this case) or pt0 = -pt1 (the result is 0 in this case). - func fast_ec_add{range_check_ptr}(pt0: G1PointFull, pt1: G1PointFull) -> (res: G1PointFull) { - alloc_locals; - if (pt0.x.d0 == 0) { - if (pt0.x.d1 == 0) { - if (pt0.x.d2 == 0) { - return (pt1,); - } - } - } - if (pt1.x.d0 == 0) { - if (pt1.x.d1 == 0) { - if (pt1.x.d2 == 0) { - return (pt0,); - } - } - } - - let (slope: BigInt4) = compute_slope(pt0, pt1); - let (slope_sqr: UnreducedBigInt7) = bigint4_mul(slope, slope); - local new_x: BigInt4; - local new_y: BigInt4; - %{ - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - x0,y0,x1,slope,p=0,0,0,0,0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - for i in range(ids.N_LIMBS): - x0+=getattr(ids.pt0.x, 'd'+str(i)) * ids.BASE**i - y0+=getattr(ids.pt0.y, 'd'+str(i)) * ids.BASE**i - x1+=getattr(ids.pt1.x, 'd'+str(i)) * ids.BASE**i - slope+=getattr(ids.slope, 'd'+str(i)) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - - new_x = (pow(slope, 2, P) - x0 - x1) % p - new_y = (slope * (x0 - new_x) - y0) % p - new_xs, new_ys = split(new_x), split(new_y) - - for i in range(ids.N_LIMBS): - setattr(ids.new_x, 'd'+str(i), new_xs[i]) - setattr(ids.new_y, 'd'+str(i), new_ys[i]) - %} - - verify_zero7( - UnreducedBigInt7( - d0=slope_sqr.d0 - new_x.d0 - pt0.x.d0 - pt1.x.d0, - d1=slope_sqr.d1 - new_x.d1 - pt0.x.d1 - pt1.x.d1, - d2=slope_sqr.d2 - new_x.d2 - pt0.x.d2 - pt1.x.d2, - d3=slope_sqr.d3 - new_x.d3 - pt0.x.d3 - pt1.x.d3, - d4=slope_sqr.d4, - d5=slope_sqr.d5, - d6=slope_sqr.d6, - ), - ); - - let (x_diff_slope: UnreducedBigInt7) = bigint4_mul( - BigInt4( - d0=pt0.x.d0 - new_x.d0, - d1=pt0.x.d1 - new_x.d1, - d2=pt0.x.d2 - new_x.d2, - d3=pt0.x.d3 - new_x.d3, - ), - slope, - ); - - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope.d0 - pt0.y.d0 - new_y.d0, - d1=x_diff_slope.d1 - pt0.y.d1 - new_y.d1, - d2=x_diff_slope.d2 - pt0.y.d2 - new_y.d2, - d3=x_diff_slope.d3 - pt0.y.d3 - new_y.d3, - d4=x_diff_slope.d4, - d5=x_diff_slope.d5, - d6=x_diff_slope.d6, - ), - ); - - return (G1PointFull(new_x, new_y),); - } - - // Same as fast_ec_add, except that the cases pt0 = ±pt1 are supported. - func add_full{range_check_ptr}(pt0: G1PointFull, pt1: G1PointFull) -> (res: G1PointFull) { - let x_diff = BigInt4( - d0=pt0.x.d0 - pt1.x.d0, - d1=pt0.x.d1 - pt1.x.d1, - d2=pt0.x.d2 - pt1.x.d2, - d3=pt0.x.d3 - pt1.x.d3, - ); - let (same_x: felt) = is_zero(x_diff); - if (same_x == 0) { - // pt0.x != pt1.x so we can use fast_ec_add. - return fast_ec_add(pt0, pt1); - } - - // We have pt0.x = pt1.x. This implies pt0.y = ±pt1.y. - // Check whether pt0.y = -pt1.y. - let y_sum = BigInt4( - d0=pt0.y.d0 + pt1.y.d0, - d1=pt0.y.d1 + pt1.y.d1, - d2=pt0.y.d2 + pt1.y.d2, - d3=pt0.y.d3 + pt1.y.d3, - ); - let (opposite_y: felt) = is_zero(y_sum); - if (opposite_y != 0) { - // pt0.y = -pt1.y. - // Note that the case pt0 = pt1 = 0 falls into this branch as well. - let ZERO_POINT = G1PointFull(BigInt4(0, 0, 0, 0), BigInt4(0, 0, 0, 0)); - return (ZERO_POINT,); - } else { - // pt0.y = pt1.y. - return double(pt0); - } - } - func add{range_check_ptr}(pt0_ptr: G1Point*, pt1_ptr: G1Point*) -> (res: G1Point*) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local pt0: G1PointFull; - assert pt0.x.d0 = pt0_ptr.x.d0; - assert pt0.x.d1 = pt0_ptr.x.d1; - assert pt0.x.d2 = pt0_ptr.x.d2; - assert pt0.x.d3 = pt0_ptr.x.d3; - assert pt0.y.d0 = pt0_ptr.y.d0; - assert pt0.y.d1 = pt0_ptr.y.d1; - assert pt0.y.d2 = pt0_ptr.y.d2; - assert pt0.y.d3 = pt0_ptr.y.d3; - - local pt1: G1PointFull; - assert pt1.x.d0 = pt1_ptr.x.d0; - assert pt1.x.d1 = pt1_ptr.x.d1; - assert pt1.x.d2 = pt1_ptr.x.d2; - assert pt1.x.d3 = pt1_ptr.x.d3; - assert pt1.y.d0 = pt1_ptr.y.d0; - assert pt1.y.d1 = pt1_ptr.y.d1; - assert pt1.y.d2 = pt1_ptr.y.d2; - assert pt1.y.d3 = pt1_ptr.y.d3; - - let x_diff = BigInt4( - d0=pt0.x.d0 - pt1.x.d0, - d1=pt0.x.d1 - pt1.x.d1, - d2=pt0.x.d2 - pt1.x.d2, - d3=pt0.x.d3 - pt1.x.d3, - ); - let (same_x: felt) = is_zero(x_diff); - if (same_x == 0) { - // pt0.x != pt1.x so we can use fast_ec_add. - let (local res) = fast_ec_add(pt0, pt1); - tempvar res_ptr = new G1Point(&res.x, &res.y); - return (res_ptr,); - } - - // We have pt0.x = pt1.x. This implies pt0.y = ±pt1.y. - // Check whether pt0.y = -pt1.y. - let y_sum = BigInt4( - d0=pt0.y.d0 + pt1.y.d0, - d1=pt0.y.d1 + pt1.y.d1, - d2=pt0.y.d2 + pt1.y.d2, - d3=pt0.y.d3 + pt1.y.d3, - ); - let (opposite_y: felt) = is_zero(y_sum); - if (opposite_y != 0) { - // pt0.y = -pt1.y. - // Note that the case pt0 = pt1 = 0 falls into this branch as well. - tempvar ZERO_POINT = new G1Point(new BigInt4(0, 0, 0, 0), new BigInt4(0, 0, 0, 0)); - return (ZERO_POINT,); - } else { - // pt0.y = pt1.y. - let (local res) = double(pt0); - tempvar res_ptr = new G1Point(&res.x, &res.y); - return (res_ptr,); - } - } - // Given 0 <= m < 250, a scalar and a point on the elliptic curve, pt, - // verifies that 0 <= scalar < 2**m and returns (2**m * pt, scalar * pt). - func ec_mul_inner{range_check_ptr}(pt: G1PointFull, scalar: felt, m: felt) -> ( - pow2: G1PointFull, res: G1PointFull - ) { - alloc_locals; - - if (m == 0) { - assert scalar = 0; - let ZERO_POINT = G1PointFull(BigInt4(0, 0, 0, 0), BigInt4(0, 0, 0, 0)); - return (pow2=pt, res=ZERO_POINT); - } - - let (double_pt: G1PointFull) = double(pt); - %{ memory[ap] = (ids.scalar % PRIME) % 2 %} - jmp odd if [ap] != 0, ap++; - return ec_mul_inner(pt=double_pt, scalar=scalar / 2, m=m - 1); - - odd: - let (local inner_pow2: G1PointFull, inner_res: G1PointFull) = ec_mul_inner( - pt=double_pt, scalar=(scalar - 1) / 2, m=m - 1 - ); - // Here inner_res = (scalar - 1) / 2 * double_pt = (scalar - 1) * pt. - // Assume pt != 0 and that inner_res = ±pt. We obtain (scalar - 1) * pt = ±pt => - // scalar - 1 = ±1 (mod N) => scalar = 0 or 2. - // In both cases (scalar - 1) / 2 cannot be in the range [0, 2**(m-1)), so we get a - // contradiction. - let (res: G1PointFull) = fast_ec_add(pt0=pt, pt1=inner_res); - return (pow2=inner_pow2, res=res); - } - // Note : BigInt4 is smaller than curve order which is 255 bits, so d3=0 - func scalar_mul{range_check_ptr}(pt: G1Point*, scalar: BigInt4) -> (res: G1Point*) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local pt_full: G1PointFull; - assert pt_full.x.d0 = pt.x.d0; - assert pt_full.x.d1 = pt.x.d1; - assert pt_full.x.d2 = pt.x.d2; - assert pt_full.x.d3 = pt.x.d3; - assert pt_full.y.d0 = pt.y.d0; - assert pt_full.y.d1 = pt.y.d1; - assert pt_full.y.d2 = pt.y.d2; - assert pt_full.y.d3 = pt.y.d3; - - let (pow2_0: G1PointFull, local res0: G1PointFull) = ec_mul_inner(pt_full, scalar.d0, 96); - %{ - print('first limb') - print_G1(ids.pow2_0) - print_G1(ids.res0) - %} - let (pow2_1: G1PointFull, local res1: G1PointFull) = ec_mul_inner(pow2_0, scalar.d1, 96); - %{ - print('second limb') - print_G1(ids.pow2_1) - print_G1(ids.res1) - %} - let (_, local res2: G1PointFull) = ec_mul_inner(pow2_1, scalar.d2, 63); - %{ - print('third limb') - print_G1(ids.res2) - %} - let (res: G1PointFull) = add_full(res0, res1); - let (local res: G1PointFull) = add_full(res, res2); - - tempvar result = new G1Point(&res.x, &res.y); - return (result,); - } - func neg{range_check_ptr}(pt: G1Point*) -> G1Point* { - alloc_locals; - let x = pt.x; - let y = fq_bigint4.neg(pt.y); - tempvar res: G1Point* = new G1Point(x, y); - return res; - } -} diff --git a/archive_tmp/bls12_381/g2.cairo b/archive_tmp/bls12_381/g2.cairo deleted file mode 100644 index b1eceec0..00000000 --- a/archive_tmp/bls12_381/g2.cairo +++ /dev/null @@ -1,786 +0,0 @@ -from starkware.cairo.common.registers import get_fp_and_pc -from src.bls12_381.curve import P0, P1, P2, P3, BASE, N_LIMBS, DEGREE -from src.bls12_381.fq import ( - fq_bigint4, - is_zero, - verify_zero7, - verify_zero4, - bigint4_mul, - bigint4_sq, - BigInt4, - UnreducedBigInt7, -) -from src.bls12_381.towers.e2 import e2, E2, mul_e2_unreduced, square_e2_unreduced - -struct G2Point { - x: E2*, - y: E2*, -} - -struct E4 { - r0: E2*, - r1: E2*, -} -namespace g2 { - func get_g2_generator() -> G2Point* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E2 = E2( - BigInt4( - 52000413785700509085167893944, - 55805278558791767872231965478, - 14165060894806320894179293954, - 709198854518927808499549479 - ), - BigInt4( - 6059577009407902906031811454, - 56280794141317933024253112594, - 42137484379184671317244818970, - 6151219408786132332018717600 - ) - ); - local y: E2 = E2( - BigInt4( - 18468318387261666668303624193, - 33814384361831584385596049868, - 67523593506331784026160747431, - 3991670722281632128876268998 - ), - BigInt4( - 28758067327843966261007448510, - 11901358394468272659349572903, - 13543046277261225735433446319, - 1865092152517166129870590640 - ) - ); - local res = G2Point(&x, &y); - return &res; - } - func assert_on_curve{range_check_ptr}(pt: G2Point) -> () { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let left = e2.mul(pt.y, pt.y); - let x_sq = e2.square(pt.x); - let x_cube = e2.mul(x_sq, pt.x); - local b20: BigInt4 = BigInt4(d0=4, d1=0, d2=0, d3=0); - local b21: BigInt4 = BigInt4(d0=4, d1=0, d2=0, d3=0); - - local b2: E2 = E2(&b20, &b21); - let right = e2.add(x_cube, &b2); - - e2.assert_E2(left, right); - return (); - } - func assert_equal(pt1: G2Point*, pt2: G2Point*) -> () { - assert pt1.x.a0.d0 = pt2.x.a0.d0; - assert pt1.x.a0.d1 = pt2.x.a0.d1; - assert pt1.x.a0.d2 = pt2.x.a0.d2; - assert pt1.x.a0.d3 = pt2.x.a0.d3; - assert pt1.x.a1.d0 = pt2.x.a1.d0; - assert pt1.x.a1.d1 = pt2.x.a1.d1; - assert pt1.x.a1.d2 = pt2.x.a1.d2; - assert pt1.x.a1.d3 = pt2.x.a1.d3; - assert pt1.y.a0.d0 = pt2.y.a0.d0; - assert pt1.y.a0.d1 = pt2.y.a0.d1; - assert pt1.y.a0.d2 = pt2.y.a0.d2; - assert pt1.y.a0.d3 = pt2.y.a0.d3; - assert pt1.y.a1.d0 = pt2.y.a1.d0; - assert pt1.y.a1.d1 = pt2.y.a1.d1; - assert pt1.y.a1.d2 = pt2.y.a1.d2; - assert pt1.y.a1.d3 = pt2.y.a1.d3; - - return (); - } - func neg{range_check_ptr}(pt: G2Point*) -> G2Point* { - alloc_locals; - let x = pt.x; - let y = e2.neg(pt.y); - tempvar res = new G2Point(x, y); - return res; - } - func compute_doubling_slope{range_check_ptr}(pt: G2Point*) -> E2* { - // Returns the slope of the elliptic curve at the given point. - // The slope is used to compute pt + pt. - // Assumption: pt != 0. - // Note that y cannot be zero: assume that it is, then pt = -pt, so 2 * pt = 0, which - // contradicts the fact that the size of the curve is odd. - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local slope_a0: BigInt4; - local slope_a1: BigInt4; - %{ - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - x,y,p=[0,0],[0,0],0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - for i in range(ids.N_LIMBS): - x[0]+=getattr(ids.pt.x.a0, 'd'+str(i)) * ids.BASE**i - x[1]+=getattr(ids.pt.x.a1, 'd'+str(i)) * ids.BASE**i - y[0]+=getattr(ids.pt.y.a0, 'd'+str(i)) * ids.BASE**i - y[1]+=getattr(ids.pt.y.a1, 'd'+str(i)) * ids.BASE**i - - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - def mul_e2(x:(int,int), y:(int,int)): - a = (x[0] + x[1]) * (y[0] + y[1]) % p - b, c = x[0]*y[0] % p, x[1]*y[1] % p - return (b - c) % p, (a - b - c) % p - def scalar_mul_e2(n:int, y:(int, int)): - a = (y[0] + y[1]) * n % p - b = y[0]*n % p - return (b, (a - b) % p) - def inv_e2(a:(int, int)): - t0, t1 = (a[0] * a[0] % p, a[1] * a[1] % p) - t0 = (t0 + t1) % p - t1 = pow(t0, -1, p) - return a[0] * t1 % p, -(a[1] * t1) % p - num=scalar_mul_e2(3, mul_e2(x,x)) - sub=scalar_mul_e2(2,y) - sub_inv= inv_e2(sub) - value = mul_e2(num, sub_inv) - - value_split = [split(value[0]), split(value[1])] - for i in range(ids.N_LIMBS): - setattr(ids.slope_a0, 'd'+str(i), value_split[0][i]) - setattr(ids.slope_a1, 'd'+str(i), value_split[1][i]) - %} - - let x0_x1: UnreducedBigInt7 = bigint4_mul([pt.x.a0], [pt.x.a1]); - let x0_sqr: UnreducedBigInt7 = bigint4_sq([pt.x.a0]); - let x1_sqr: UnreducedBigInt7 = bigint4_sq([pt.x.a1]); - - let s0_y0: UnreducedBigInt7 = bigint4_mul(slope_a0, [pt.y.a0]); - let s1_y1: UnreducedBigInt7 = bigint4_mul(slope_a1, [pt.y.a1]); - - let s0_y1: UnreducedBigInt7 = bigint4_mul(slope_a0, [pt.y.a1]); - let s1_y0: UnreducedBigInt7 = bigint4_mul(slope_a1, [pt.y.a0]); - - // Verify real - verify_zero7( - UnreducedBigInt7( - d0=3 * (x0_sqr.d0 - x1_sqr.d0) - 2 * (s0_y0.d0 - s1_y1.d0), - d1=3 * (x0_sqr.d1 - x1_sqr.d1) - 2 * (s0_y0.d1 - s1_y1.d1), - d2=3 * (x0_sqr.d2 - x1_sqr.d2) - 2 * (s0_y0.d2 - s1_y1.d2), - d3=3 * (x0_sqr.d3 - x1_sqr.d3) - 2 * (s0_y0.d3 - s1_y1.d3), - d4=3 * (x0_sqr.d4 - x1_sqr.d4) - 2 * (s0_y0.d4 - s1_y1.d4), - d5=3 * (x0_sqr.d5 - x1_sqr.d5) - 2 * (s0_y0.d5 - s1_y1.d5), - d6=3 * (x0_sqr.d6 - x1_sqr.d6) - 2 * (s0_y0.d6 - s1_y1.d6), - ), - ); - // Verify imaginary - verify_zero7( - UnreducedBigInt7( - d0=2 * (3 * x0_x1.d0 - s0_y1.d0 - s1_y0.d0), - d1=2 * (3 * x0_x1.d1 - s0_y1.d1 - s1_y0.d1), - d2=2 * (3 * x0_x1.d2 - s0_y1.d2 - s1_y0.d2), - d3=2 * (3 * x0_x1.d3 - s0_y1.d3 - s1_y0.d3), - d4=2 * (3 * x0_x1.d4 - s0_y1.d4 - s1_y0.d4), - d5=2 * (3 * x0_x1.d5 - s0_y1.d5 - s1_y0.d5), - d6=2 * (3 * x0_x1.d6 - s0_y1.d6 - s1_y0.d6), - ), - ); - - local slope: E2 = E2(a0=&slope_a0, a1=&slope_a1); - return &slope; - } - // Returns the slope of the line connecting the two given points. - // The slope is used to compute pt0 + pt1. - // Assumption: pt0.x != pt1.x (mod field prime). - func compute_slope{range_check_ptr}(pt0: G2Point*, pt1: G2Point*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local slope_a0: BigInt4; - local slope_a1: BigInt4; - %{ - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - x0,y0,x1,y1,p=[0,0],[0,0],[0,0],[0,0],0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - for i in range(ids.N_LIMBS): - x0[0]+=getattr(ids.pt0.x.a0,'d'+str(i)) * ids.BASE**i - x0[1]+=getattr(ids.pt0.x.a1,'d'+str(i)) * ids.BASE**i - y0[0]+=getattr(ids.pt0.y.a0,'d'+str(i)) * ids.BASE**i - y0[1]+=getattr(ids.pt0.y.a1,'d'+str(i)) * ids.BASE**i - x1[0]+=getattr(ids.pt1.x.a0,'d'+str(i)) * ids.BASE**i - x1[1]+=getattr(ids.pt1.x.a1,'d'+str(i)) * ids.BASE**i - y1[0]+=getattr(ids.pt1.y.a0,'d'+str(i)) * ids.BASE**i - y1[1]+=getattr(ids.pt1.y.a1,'d'+str(i)) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - def mul_e2(x:(int,int), y:(int,int)): - a = (x[0] + x[1]) * (y[0] + y[1]) % p - b, c = x[0]*y[0] % p, x[1]*y[1] % p - return (b - c) % p, (a - b - c) % p - def sub_e2(x:(int,int), y:(int,int)): - return (x[0]-y[0]) % p, (x[1]-y[1]) % p - def inv_e2(a:(int, int)): - t0, t1 = (a[0] * a[0] % p, a[1] * a[1] % p) - t0 = (t0 + t1) % p - t1 = pow(t0, -1, p) - return a[0] * t1 % p, -(a[1] * t1) % p - - sub = sub_e2(x0,x1) - sub_inv = inv_e2(sub) - numerator = sub_e2(y0,y1) - value=mul_e2(numerator,sub_inv) - - value_split = [split(value[0]), split(value[1])] - for i in range(ids.N_LIMBS): - setattr(ids.slope_a0, 'd'+str(i), value_split[0][i]) - setattr(ids.slope_a1, 'd'+str(i), value_split[1][i]) - %} - - tempvar x_diff_real: BigInt4 = BigInt4( - d0=pt0.x.a0.d0 - pt1.x.a0.d0, - d1=pt0.x.a0.d1 - pt1.x.a0.d1, - d2=pt0.x.a0.d2 - pt1.x.a0.d2, - d3=pt0.x.a0.d3 - pt1.x.a0.d3, - ); - tempvar x_diff_imag: BigInt4 = BigInt4( - d0=pt0.x.a1.d0 - pt1.x.a1.d0, - d1=pt0.x.a1.d1 - pt1.x.a1.d1, - d2=pt0.x.a1.d2 - pt1.x.a1.d2, - d3=pt0.x.a1.d3 - pt1.x.a1.d3, - ); - - let x_diff_slope_imag_first_term: UnreducedBigInt7 = bigint4_mul(x_diff_real, slope_a1); - let x_diff_slope_imag_second_term: UnreducedBigInt7 = bigint4_mul(x_diff_imag, slope_a0); - - let x_diff_real_first_term: UnreducedBigInt7 = bigint4_mul(x_diff_real, slope_a0); - let x_diff_real_second_term: UnreducedBigInt7 = bigint4_mul(x_diff_imag, slope_a1); - - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope_imag_first_term.d0 + x_diff_slope_imag_second_term.d0 - - pt0.y.a1.d0 + pt1.y.a1.d0, - d1=x_diff_slope_imag_first_term.d1 + x_diff_slope_imag_second_term.d1 - - pt0.y.a1.d1 + pt1.y.a1.d1, - d2=x_diff_slope_imag_first_term.d2 + x_diff_slope_imag_second_term.d2 - - pt0.y.a1.d2 + pt1.y.a1.d2, - d3=x_diff_slope_imag_first_term.d3 + x_diff_slope_imag_second_term.d3 - - pt0.y.a1.d3 + pt1.y.a1.d3, - d4=x_diff_slope_imag_first_term.d4 + x_diff_slope_imag_second_term.d4, - d5=x_diff_slope_imag_first_term.d5 + x_diff_slope_imag_second_term.d5, - d6=x_diff_slope_imag_first_term.d6 + x_diff_slope_imag_second_term.d6, - ), - ); - - verify_zero7( - UnreducedBigInt7( - d0=x_diff_real_first_term.d0 - x_diff_real_second_term.d0 - pt0.y.a0.d0 + - pt1.y.a0.d0, - d1=x_diff_real_first_term.d1 - x_diff_real_second_term.d1 - pt0.y.a0.d1 + - pt1.y.a0.d1, - d2=x_diff_real_first_term.d2 - x_diff_real_second_term.d2 - pt0.y.a0.d2 + - pt1.y.a0.d2, - d3=x_diff_real_first_term.d3 - x_diff_real_second_term.d3 - pt0.y.a0.d3 + - pt1.y.a0.d3, - d4=x_diff_real_first_term.d4 - x_diff_real_second_term.d4, - d5=x_diff_real_first_term.d5 - x_diff_real_second_term.d5, - d6=x_diff_real_first_term.d6 - x_diff_real_second_term.d6, - ), - ); - local slope: E2 = E2(a0=&slope_a0, a1=&slope_a1); - - return &slope; - } - - // Given a point 'pt' on the elliptic curve, computes pt + pt. - func double{range_check_ptr}(pt: G2Point*) -> G2Point* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let is_zero = e2.is_zero(pt.x); - if (is_zero == 1) { - return pt; - } - - let slope: E2* = compute_doubling_slope(pt); - let (slope_sqr_a0: UnreducedBigInt7, slope_sqr_a1: UnreducedBigInt7) = square_e2_unreduced( - slope - ); - - local new_x_a0: BigInt4; - local new_x_a1: BigInt4; - local new_y_a0: BigInt4; - local new_y_a1: BigInt4; - %{ - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - x0,x1,y0,y1,slope,p=0,0,0,0,[0,0],0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - def inv_e2(a0:int, a1:int): - t0, t1 = (a0 * a0 % p, a1 * a1 % p) - t0 = (t0 + t1) % p - t1 = pow(t0, -1, p) - return (a0 * t1 % p, -(a1 * t1) % p) - def mul_e2(x:(int,int), y:(int,int)): - a = (x[0] + x[1]) * (y[0] + y[1]) % p - b, c = x[0]*y[0] % p, x[1]*y[1] % p - return (b - c) % p, (a - b - c) % p - def sub_e2(x:(int,int), y:(int,int)): - return (x[0]-y[0]) % p, (x[1]-y[1]) % p - - for i in range(ids.N_LIMBS): - x0+=getattr(ids.pt.x.a0, 'd'+str(i)) * ids.BASE**i - x1+=getattr(ids.pt.x.a1, 'd'+str(i)) * ids.BASE**i - y0+=getattr(ids.pt.y.a0, 'd'+str(i)) * ids.BASE**i - y1+=getattr(ids.pt.y.a1, 'd'+str(i)) * ids.BASE**i - slope[0]+=getattr(ids.slope.a0, 'd'+str(i)) * ids.BASE**i - slope[1]+=getattr(ids.slope.a1, 'd'+str(i)) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - new_x = sub_e2(mul_e2(slope, slope), mul_e2((2,0), (x0,x1))) - new_y = sub_e2(mul_e2(slope, sub_e2((x0,x1), new_x)), (y0,y1)) - new_xs, new_ys = [split(new_x[0]), split(new_x[1])], [split(new_y[0]), split(new_y[1])] - - for i in range(ids.N_LIMBS): - setattr(ids.new_x_a0, 'd'+str(i), new_xs[0][i]) - setattr(ids.new_x_a1, 'd'+str(i), new_xs[1][i]) - setattr(ids.new_y_a0, 'd'+str(i), new_ys[0][i]) - setattr(ids.new_y_a1, 'd'+str(i), new_ys[1][i]) - %} - - verify_zero7( - UnreducedBigInt7( - d0=slope_sqr_a0.d0 - new_x_a0.d0 - 2 * pt.x.a0.d0, - d1=slope_sqr_a0.d1 - new_x_a0.d1 - 2 * pt.x.a0.d1, - d2=slope_sqr_a0.d2 - new_x_a0.d2 - 2 * pt.x.a0.d2, - d3=slope_sqr_a0.d3 - new_x_a0.d3 - 2 * pt.x.a0.d3, - d4=slope_sqr_a0.d4, - d5=slope_sqr_a0.d5, - d6=slope_sqr_a0.d6, - ), - ); - - verify_zero7( - UnreducedBigInt7( - d0=slope_sqr_a1.d0 - new_x_a1.d0 - 2 * pt.x.a1.d0, - d1=slope_sqr_a1.d1 - new_x_a1.d1 - 2 * pt.x.a1.d1, - d2=slope_sqr_a1.d2 - new_x_a1.d2 - 2 * pt.x.a1.d2, - d3=slope_sqr_a1.d3 - new_x_a1.d3 - 2 * pt.x.a1.d3, - d4=slope_sqr_a1.d4, - d5=slope_sqr_a1.d5, - d6=slope_sqr_a1.d6, - ), - ); - local x_min_new_x_a0: BigInt4 = BigInt4( - d0=pt.x.a0.d0 - new_x_a0.d0, - d1=pt.x.a0.d1 - new_x_a0.d1, - d2=pt.x.a0.d2 - new_x_a0.d2, - d3=pt.x.a0.d3 - new_x_a0.d3, - ); - local x_min_new_x_a1: BigInt4 = BigInt4( - d0=pt.x.a1.d0 - new_x_a1.d0, - d1=pt.x.a1.d1 - new_x_a1.d1, - d2=pt.x.a1.d2 - new_x_a1.d2, - d3=pt.x.a1.d3 - new_x_a1.d3, - ); - local x_min_new_x: E2 = E2(&x_min_new_x_a0, &x_min_new_x_a1); - - // let x_diff_slope: E2* = e2.mul(&x_min_new_x, slope); - let (x_diff_slope_a0, x_diff_slope_a1) = mul_e2_unreduced(&x_min_new_x, slope); - - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope_a0.d0 - pt.y.a0.d0 - new_y_a0.d0, - d1=x_diff_slope_a0.d1 - pt.y.a0.d1 - new_y_a0.d1, - d2=x_diff_slope_a0.d2 - pt.y.a0.d2 - new_y_a0.d2, - d3=x_diff_slope_a0.d3 - pt.y.a0.d3 - new_y_a0.d3, - d4=x_diff_slope_a0.d4, - d5=x_diff_slope_a0.d5, - d6=x_diff_slope_a0.d6, - ), - ); - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope_a1.d0 - pt.y.a1.d0 - new_y_a1.d0, - d1=x_diff_slope_a1.d1 - pt.y.a1.d1 - new_y_a1.d1, - d2=x_diff_slope_a1.d2 - pt.y.a1.d2 - new_y_a1.d2, - d3=x_diff_slope_a1.d3 - pt.y.a1.d3 - new_y_a1.d3, - d4=x_diff_slope_a1.d4, - d5=x_diff_slope_a1.d5, - d6=x_diff_slope_a1.d6, - ), - ); - local new_x: E2 = E2(&new_x_a0, &new_x_a1); - local new_y: E2 = E2(&new_y_a0, &new_y_a1); - local res: G2Point = G2Point(&new_x, &new_y); - return &res; - } - - // DoubleStep doubles a point in affine coordinates, and evaluates the line in Miller loop - // https://eprint.iacr.org/2013/722.pdf (Section 4.3) - func double_step{range_check_ptr}(pt: G2Point*) -> (res: G2Point*, line_eval: E4*) { - alloc_locals; - // if (pt.x.d0 == 0) { - // if (pt.x.d1 == 0) { - // if (pt.x.d2 == 0) { - // let zero_6 = E6.zero(); - // return (pt, zero_6); - // } - // } - // } - - let (__fp__, _) = get_fp_and_pc(); - // assert_on_curve(pt); - // precomputations in p : - - // let xp_bar = fq_bigint4.neg(p.x); - // let yp_prime = fq_bigint4.inv(p.y); - // let xp_prime = fq_bigint4.mul(xp_bar, yp_prime); - // paper algo: - // let two_y = e2.double(pt.y); - // let A = e2.inv(two_y); - // let x_sq = e2.square(pt.x); - // tempvar three = new BigInt4(3, 0, 0); - // let B = e2.mul_by_element(three, x_sq); - // let C = e2.mul(A, B); // lamba : slope - let C = compute_doubling_slope(pt); - - let D = e2.double(pt.x); - let nx = e2.square(C); - let nx = e2.sub(nx, D); - let E = e2.mul(C, pt.x); - let E = e2.sub(E, pt.y); - let ny = e2.mul(C, nx); - let ny = e2.sub(E, ny); - - // assert_on_curve(res); - - // let F = e2.mul_by_element(xp_prime, C); - // let G = e2.mul_by_element(yp_prime, E); - tempvar res: G2Point* = new G2Point(nx, ny); - // tempvar line_eval: E4* = new E4(G, F); - tempvar line_eval: E4* = new E4(E, C); - - return (res, line_eval); - } - func add_step{range_check_ptr}(pt0: G2Point*, pt1: G2Point*) -> ( - res: G2Point*, line_eval: E4* - ) { - alloc_locals; - // if (pt0.x.d0 == 0) { - // if (pt0.x.d1 == 0) { - // if (pt0.x.d2 == 0) { - // let zero_6 = E6.zero(); - // return (pt1, zero_6); - // } - // } - // } - // if (pt1.x.d0 == 0) { - // if (pt1.x.d1 == 0) { - // if (pt1.x.d2 == 0) { - // let zero_6 = E6.zero(); - // return (pt0, zero_6); - // } - // } - // } - // assert_on_curve(pt0); - // assert_on_curve(pt1); - // precomputations in p : - // let xp_bar = fq_bigint4.neg(p.x); - // let yp_prime = fq_bigint4.inv(p.y); - // let xp_prime = fq_bigint4.mul(xp_bar, yp_prime); - // paper algo: - - let C = compute_slope(pt0, pt1); - let D = e2.add(pt0.x, pt1.x); - let nx = e2.square(C); - let nx = e2.sub(nx, D); - let E = e2.mul(C, pt0.x); - let E = e2.sub(E, pt0.y); - let ny = e2.mul(C, nx); - let ny = e2.sub(E, ny); - // assert_on_curve(res); - - // let F = e2.mul_by_element(xp_prime, C); - // let G = e2.mul_by_element(yp_prime, E); - // let one_e2 = e2.one(); - tempvar res: G2Point* = new G2Point(nx, ny); - tempvar line_eval: E4* = new E4(E, C); - return (res, line_eval); - } - - // Adds two points on the elliptic curve. - // Assumption: pt0.x != pt1.x (however, pt0 = pt1 = 0 is allowed). - // Note that this means that the function cannot be used if pt0 = pt1 - // (use ec_double() in this case) or pt0 = -pt1 (the result is 0 in this case). - func fast_ec_add{range_check_ptr}(pt0: G2Point*, pt1: G2Point*) -> G2Point* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let is_zero_pt0_x = e2.is_zero(pt0.x); - let is_zero_pt1_x = e2.is_zero(pt1.x); - if (is_zero_pt0_x != 0) { - return pt1; - } - if (is_zero_pt1_x != 0) { - return pt0; - } - let slope: E2* = compute_slope(pt0, pt1); - let (slope_sqr_a0: UnreducedBigInt7, slope_sqr_a1: UnreducedBigInt7) = square_e2_unreduced( - slope - ); - local new_x_a0: BigInt4; - local new_x_a1: BigInt4; - local new_y_a0: BigInt4; - local new_y_a1: BigInt4; - %{ - from starkware.python.math_utils import div_mod - - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - pt0x0,pt0x1,pt0y0,pt0y1,pt1x0,pt1x1,slope,p=0,0,0,0,0,0,[0,0],0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - def inv_e2(a0:int, a1:int): - t0, t1 = (a0 * a0 % p, a1 * a1 % p) - t0 = (t0 + t1) % p - t1 = pow(t0, -1, p) - return (a0 * t1 % p, -(a1 * t1) % p) - def mul_e2(x:(int,int), y:(int,int)): - a = (x[0] + x[1]) * (y[0] + y[1]) % p - b, c = x[0]*y[0] % p, x[1]*y[1] % p - return (b - c) % p, (a - b - c) % p - def sub_e2(x:(int,int), y:(int,int)): - return (x[0]-y[0]) % p, (x[1]-y[1]) % p - - for i in range(ids.N_LIMBS): - pt0x0+=getattr(ids.pt0.x.a0, 'd'+str(i)) * ids.BASE**i - pt0x1+=getattr(ids.pt0.x.a1, 'd'+str(i)) * ids.BASE**i - pt0y0+=getattr(ids.pt0.y.a0, 'd'+str(i)) * ids.BASE**i - pt0y1+=getattr(ids.pt0.y.a1, 'd'+str(i)) * ids.BASE**i - pt1x0+=getattr(ids.pt1.x.a0, 'd'+str(i)) * ids.BASE**i - pt1x1+=getattr(ids.pt1.x.a1, 'd'+str(i)) * ids.BASE**i - slope[0]+=getattr(ids.slope.a0, 'd'+str(i)) * ids.BASE**i - slope[1]+=getattr(ids.slope.a1, 'd'+str(i)) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - new_x = sub_e2(sub_e2(mul_e2(slope, slope), (pt0x0,pt0x1)), (pt1x0,pt1x1)) - new_y = sub_e2(mul_e2(slope, sub_e2((pt0x0,pt0x1), new_x)), (pt0y0,pt0y1)) - - new_xs, new_ys = [split(new_x[0]), split(new_x[1])], [split(new_y[0]), split(new_y[1])] - - for i in range(ids.N_LIMBS): - setattr(ids.new_x_a0, 'd'+str(i), new_xs[0][i]) - setattr(ids.new_x_a1, 'd'+str(i), new_xs[1][i]) - setattr(ids.new_y_a0, 'd'+str(i), new_ys[0][i]) - setattr(ids.new_y_a1, 'd'+str(i), new_ys[1][i]) - %} - - verify_zero7( - UnreducedBigInt7( - d0=slope_sqr_a0.d0 - new_x_a0.d0 - pt0.x.a0.d0 - pt1.x.a0.d0, - d1=slope_sqr_a0.d1 - new_x_a0.d1 - pt0.x.a0.d1 - pt1.x.a0.d1, - d2=slope_sqr_a0.d2 - new_x_a0.d2 - pt0.x.a0.d2 - pt1.x.a0.d2, - d3=slope_sqr_a0.d3 - new_x_a0.d3 - pt0.x.a0.d3 - pt1.x.a0.d3, - d4=slope_sqr_a0.d4, - d5=slope_sqr_a0.d5, - d6=slope_sqr_a0.d6, - ), - ); - verify_zero7( - UnreducedBigInt7( - d0=slope_sqr_a1.d0 - new_x_a1.d0 - pt0.x.a1.d0 - pt1.x.a1.d0, - d1=slope_sqr_a1.d1 - new_x_a1.d1 - pt0.x.a1.d1 - pt1.x.a1.d1, - d2=slope_sqr_a1.d2 - new_x_a1.d2 - pt0.x.a1.d2 - pt1.x.a1.d2, - d3=slope_sqr_a1.d3 - new_x_a1.d3 - pt0.x.a1.d3 - pt1.x.a1.d3, - d4=slope_sqr_a1.d4, - d5=slope_sqr_a1.d5, - d6=slope_sqr_a1.d6, - ), - ); - local x_min_new_x_a0: BigInt4 = BigInt4( - d0=pt0.x.a0.d0 - new_x_a0.d0, - d1=pt0.x.a0.d1 - new_x_a0.d1, - d2=pt0.x.a0.d2 - new_x_a0.d2, - d3=pt0.x.a0.d3 - new_x_a0.d3, - ); - local x_min_new_x_a1: BigInt4 = BigInt4( - d0=pt0.x.a1.d0 - new_x_a1.d0, - d1=pt0.x.a1.d1 - new_x_a1.d1, - d2=pt0.x.a1.d2 - new_x_a1.d2, - d3=pt0.x.a1.d3 - new_x_a1.d3, - ); - local x_min_new_x: E2 = E2(&x_min_new_x_a0, &x_min_new_x_a1); - - let ( - x_diff_slope_a0: UnreducedBigInt7, x_diff_slope_a1: UnreducedBigInt7 - ) = mul_e2_unreduced(&x_min_new_x, slope); - - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope_a0.d0 - pt0.y.a0.d0 - new_y_a0.d0, - d1=x_diff_slope_a0.d1 - pt0.y.a0.d1 - new_y_a0.d1, - d2=x_diff_slope_a0.d2 - pt0.y.a0.d2 - new_y_a0.d2, - d3=x_diff_slope_a0.d3 - pt0.y.a0.d3 - new_y_a0.d3, - d4=x_diff_slope_a0.d4, - d5=x_diff_slope_a0.d5, - d6=x_diff_slope_a0.d6, - ), - ); - verify_zero7( - UnreducedBigInt7( - d0=x_diff_slope_a1.d0 - pt0.y.a1.d0 - new_y_a1.d0, - d1=x_diff_slope_a1.d1 - pt0.y.a1.d1 - new_y_a1.d1, - d2=x_diff_slope_a1.d2 - pt0.y.a1.d2 - new_y_a1.d2, - d3=x_diff_slope_a1.d3 - pt0.y.a1.d3 - new_y_a1.d3, - d4=x_diff_slope_a1.d4, - d5=x_diff_slope_a1.d5, - d6=x_diff_slope_a1.d6, - ), - ); - local new_x: E2 = E2(&new_x_a0, &new_x_a1); - local new_y: E2 = E2(&new_y_a0, &new_y_a1); - local res: G2Point = G2Point(&new_x, &new_y); - return &res; - } - // Same as fast_ec_add, except that the cases pt0 = ±pt1 are supported. - func add{range_check_ptr}(pt0: G2Point*, pt1: G2Point*) -> G2Point* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x_diff_a0: BigInt4 = BigInt4( - d0=pt0.x.a0.d0 - pt1.x.a0.d0, - d1=pt0.x.a0.d1 - pt1.x.a0.d1, - d2=pt0.x.a0.d2 - pt1.x.a0.d2, - d3=pt0.x.a0.d3 - pt1.x.a0.d3, - ); - local x_diff_a1: BigInt4 = BigInt4( - d0=pt0.x.a1.d0 - pt1.x.a1.d0, - d1=pt0.x.a1.d1 - pt1.x.a1.d1, - d2=pt0.x.a1.d2 - pt1.x.a1.d2, - d3=pt0.x.a1.d3 - pt1.x.a1.d3, - ); - local x_diff: E2 = E2(&x_diff_a0, &x_diff_a1); - - let same_x: felt = e2.is_zero(&x_diff); - if (same_x == 0) { - // pt0.x != pt1.x so we can use fast_ec_add. - return fast_ec_add(pt0, pt1); - } - - // We have pt0.x = pt1.x. This implies pt0.y = ±pt1.y. - // Check whether pt0.y = -pt1.y. - local y_sum_a0: BigInt4 = BigInt4( - d0=pt0.y.a0.d0 + pt1.y.a0.d0, - d1=pt0.y.a0.d1 + pt1.y.a0.d1, - d2=pt0.y.a0.d2 + pt1.y.a0.d2, - d3=pt0.y.a0.d3 + pt1.y.a0.d3, - ); - local y_sum_a1: BigInt4 = BigInt4( - d0=pt0.y.a1.d0 + pt1.y.a1.d0, - d1=pt0.y.a1.d1 + pt1.y.a1.d1, - d2=pt0.y.a1.d2 + pt1.y.a1.d2, - d3=pt0.y.a1.d3 + pt1.y.a1.d3, - ); - - local y_sum: E2 = E2(&y_sum_a0, &y_sum_a1); - - let opposite_y: felt = e2.is_zero(&y_sum); - if (opposite_y != 0) { - // pt0.y = -pt1.y. - // Note that the case pt0 = pt1 = 0 falls into this branch as well. - let zero2 = e2.zero(); - local ZERO_POINT: G2Point = G2Point(zero2, zero2); - return &ZERO_POINT; - } else { - // pt0.y = pt1.y. - return double(pt0); - } - } - - // Given 0 <= m < 250, a scalar and a point on the elliptic curve, pt, - // verifies that 0 <= scalar < 2**m and returns (2**m * pt, scalar * pt). - func ec_mul_inner{range_check_ptr}(pt: G2Point*, scalar: felt, m: felt) -> ( - pow2: G2Point*, res: G2Point* - ) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - // %{ print(f"scalar = {ids.scalar}, m=={ids.m}") %} - if (m == 0) { - assert scalar = 0; - let zero2 = e2.zero(); - local ZERO_POINT: G2Point = G2Point(zero2, zero2); - return (pow2=pt, res=&ZERO_POINT); - } - - let double_pt: G2Point* = double(pt); - // %{ print_G2(ids.double_pt) %} - %{ memory[ap] = (ids.scalar % PRIME) % 2 %} - jmp odd if [ap] != 0, ap++; - return ec_mul_inner(pt=double_pt, scalar=scalar / 2, m=m - 1); - - odd: - let (local inner_pow2: G2Point*, inner_res: G2Point*) = ec_mul_inner( - pt=double_pt, scalar=(scalar - 1) / 2, m=m - 1 - ); - // Here inner_res = (scalar - 1) / 2 * double_pt = (scalar - 1) * pt. - // Assume pt != 0 and that inner_res = ±pt. We obtain (scalar - 1) * pt = ±pt => - // scalar - 1 = ±1 (mod N) => scalar = 0 or 2. - // In both cases (scalar - 1) / 2 cannot be in the range [0, 2**(m-1)), so we get a - // contradiction. - let res: G2Point* = fast_ec_add(pt0=pt, pt1=inner_res); - return (pow2=inner_pow2, res=res); - } - - func scalar_mul{range_check_ptr}(pt: G2Point*, scalar: BigInt4*) -> (res: G2Point*) { - alloc_locals; - let (pow2_0: G2Point*, local res0: G2Point*) = ec_mul_inner(pt, scalar.d0, 96); - %{ - print('first limb') - print_G2(ids.pow2_0) - print_G2(ids.res0) - %} - let (pow2_1: G2Point*, local res1: G2Point*) = ec_mul_inner(pow2_0, scalar.d1, 96); - %{ - print('second limb') - print_G2(ids.pow2_1) - print_G2(ids.res1) - %} - - let (_, local res2: G2Point*) = ec_mul_inner(pow2_1, scalar.d2, 63); - %{ - print('third limb') - print_G2(ids.res2) - %} - let res: G2Point* = add(res0, res1); - let res: G2Point* = add(res, res2); - return (res,); - } -} diff --git a/archive_tmp/bls12_381/pairing.cairo b/archive_tmp/bls12_381/pairing.cairo deleted file mode 100644 index b6531029..00000000 --- a/archive_tmp/bls12_381/pairing.cairo +++ /dev/null @@ -1,276 +0,0 @@ -from starkware.cairo.common.registers import get_label_location -from src.bls12_381.g1 import G1Point -from src.bls12_381.g2 import G2Point, g2, E4 -from src.bls12_381.towers.e12 import E12, e12 -from src.bls12_381.towers.e2 import E2, e2 -from src.bls12_381.towers.e6 import E6, e6 -from src.bls12_381.nG2_lines import get_nQ_lines -from src.bls12_381.fq import BigInt4, fq_bigint4 - -const ate_loop_count = 15132376222941642752; -const log_ate_loop_count = 63; - -func pair{range_check_ptr}(P: G1Point*, Q: G2Point*) -> E12* { - alloc_locals; - let f = miller_loop(P, Q); - let f = final_exponentiation(f); - return f; -} - -func pair_fixed_G2{range_check_ptr}(P: G1Point*) -> E12* { - alloc_locals; - let f = miller_loop_fixed_G2(P); - let f = final_exponentiation(f); - return f; -} -func miller_loop{range_check_ptr}(P: G1Point*, Q: G2Point*) -> E12* { - alloc_locals; - // todo : Assert P, Q not 0 (point at infinity) - %{ - import numpy as np - def print_G2(id, index, bit): - x0 = id.x.a0.d0 + id.x.a0.d1 * 2**96 + id.x.a0.d2 * 2**192 + id.x.a0.d3 * 2**288 - x1 = id.x.a1.d0 + id.x.a1.d1 * 2**96 + id.x.a1.d2 * 2**192 + id.x.a1.d3 * 2**288 - y0 = id.y.a0.d0 + id.y.a0.d1 * 2**96 + id.y.a0.d2 * 2**192 + id.y.a0.d3 * 2**288 - y1 = id.y.a1.d0 + id.y.a1.d1 * 2**96 + id.y.a1.d2 * 2**192 + id.y.a1.d3 * 2**288 - print(f"{index} || {bit} X={np.base_repr(x0,36).lower()} + {np.base_repr(x1,36).lower()}*u ") - # print(f"Y={np.base_repr(y0,36).lower()} + {np.base_repr(y1,36).lower()}*u") - %} - - local Q_original: G2Point* = Q; - - let result = e12.one(); - let xp_bar = fq_bigint4.neg(P.x); - let yp_prime = fq_bigint4.inv(P.y); - let xp_prime = fq_bigint4.mul(xp_bar, yp_prime); - let (Q: G2Point*, l1: E4*) = g2.double_step(Q); - let l1r0 = e2.mul_by_element(yp_prime, l1.r0); - let l1r1 = e2.mul_by_element(xp_prime, l1.r1); - - let (Q: G2Point*, l2: E4*) = g2.add_step(Q, Q_original); - let l2r0 = e2.mul_by_element(yp_prime, l2.r0); - let l2r1 = e2.mul_by_element(xp_prime, l2.r1); - - let lines = e12.mul_014_by_014(l1r0, l1r1, l2r0, l2r1); - let result = e12.mul(result, lines); - - with Q_original, xp_prime, yp_prime { - let (local final_Q: G2Point*, local result: E12*) = miller_loop_inner( - Q=Q, result=result, index=61 - ); - } - - return result; -} -func miller_loop_inner{ - range_check_ptr, Q_original: G2Point*, xp_prime: BigInt4*, yp_prime: BigInt4* -}(Q: G2Point*, result: E12*, index: felt) -> (point: G2Point*, res: E12*) { - alloc_locals; - if (index == -1) { - // negative x₀ - let result = e12.conjugate(result); - return (Q, result); - } - - let result = e12.square(result); - let (Q: G2Point*, l1: E4*) = g2.double_step(Q); - let l1r0 = e2.mul_by_element(yp_prime, l1.r0); - let l1r1 = e2.mul_by_element(xp_prime, l1.r1); - let (local bit: felt) = get_loop_digit(index); - if (bit == 0) { - let result = e12.mul_by_014(result, l1r0, l1r1); - %{ print_G2(ids.Q, ids.index, ids.bit) %} - return miller_loop_inner(Q, result, index - 1); - } else { - let (Q: G2Point*, l2: E4*) = g2.add_step(Q, Q_original); - let l2r0 = e2.mul_by_element(yp_prime, l2.r0); - let l2r1 = e2.mul_by_element(xp_prime, l2.r1); - let lines = e12.mul_014_by_014(l1r0, l1r1, l2r0, l2r1); - let result = e12.mul(result, lines); - %{ print_G2(ids.Q, ids.index, ids.bit) %} - - return miller_loop_inner(Q, result, index - 1); - } -} -func miller_loop_fixed_G2{range_check_ptr}(P: G1Point*) -> E12* { - alloc_locals; - // todo : Assert P, Q not 0 (point at infinity) - %{ - import numpy as np - def print_G2(id, index, bit): - x0 = id.x.a0.d0 + id.x.a0.d1 * 2**96 + id.x.a0.d2 * 2**192 + id.x.a0.d3 * 2**288 - x1 = id.x.a1.d0 + id.x.a1.d1 * 2**96 + id.x.a1.d2 * 2**192 + id.x.a1.d3 * 2**288 - y0 = id.y.a0.d0 + id.y.a0.d1 * 2**96 + id.y.a0.d2 * 2**192 + id.y.a0.d3 * 2**288 - y1 = id.y.a1.d0 + id.y.a1.d1 * 2**96 + id.y.a1.d2 * 2**192 + id.y.a1.d3 * 2**288 - print(f"{index} || {bit} X={np.base_repr(x0,36).lower()} + {np.base_repr(x1,36).lower()}*u ") - # print(f"Y={np.base_repr(y0,36).lower()} + {np.base_repr(y1,36).lower()}*u") - %} - - // local Q_original: G2Point* = Q; - - let result = e12.one(); - let xp_bar = fq_bigint4.neg(P.x); - let yp_prime = fq_bigint4.inv(P.y); - let xp_prime = fq_bigint4.mul(xp_bar, yp_prime); - let (Q: G2Point*, local l1: E4*) = get_nQ_lines(0); - let l1r0 = e2.mul_by_element(yp_prime, l1.r0); - let l1r1 = e2.mul_by_element(xp_prime, l1.r1); - - let (Q: G2Point*, local l2: E4*) = get_nQ_lines(1); - let l2r0 = e2.mul_by_element(yp_prime, l2.r0); - let l2r1 = e2.mul_by_element(xp_prime, l2.r1); - - let lines = e12.mul_014_by_014(l1r0, l1r1, l2r0, l2r1); - let result = e12.mul(result, lines); - let n = 2; - with n, xp_prime, yp_prime { - let (local final_Q: G2Point*, local result: E12*) = miller_loop_fixed_G2_inner( - Q=Q, result=result, index=61 - ); - } - - return result; -} -func miller_loop_fixed_G2_inner{range_check_ptr, n: felt, xp_prime: BigInt4*, yp_prime: BigInt4*}( - Q: G2Point*, result: E12*, index: felt -) -> (point: G2Point*, res: E12*) { - alloc_locals; - if (index == -1) { - let result = e12.conjugate(result); - return (Q, result); - } - - let result = e12.square(result); - let (Q: G2Point*, l1: E4*) = get_nQ_lines(n); - let l1r0 = e2.mul_by_element(yp_prime, l1.r0); - let l1r1 = e2.mul_by_element(xp_prime, l1.r1); - let n = n + 1; - let (local bit: felt) = get_loop_digit(index); - if (bit == 0) { - let result = e12.mul_by_014(result, l1r0, l1r1); - // %{ print_G2(ids.Q, ids.index, ids.bit) %} - return miller_loop_fixed_G2_inner(Q, result, index - 1); - } else { - let (Q: G2Point*, l2: E4*) = get_nQ_lines(n); - let l2r0 = e2.mul_by_element(yp_prime, l2.r0); - let l2r1 = e2.mul_by_element(xp_prime, l2.r1); - let n = n + 1; - let lines = e12.mul_014_by_014(l1r0, l1r1, l2r0, l2r1); - let result = e12.mul(result, lines); - return miller_loop_fixed_G2_inner(Q, result, index - 1); - } -} - -func final_exponentiation{range_check_ptr}(z: E12*) -> E12* { - alloc_locals; - - // Easy part - // (p⁶-1)(p²+1) - let result = z; - let t0 = e12.conjugate(z); - let result = e12.inverse(result); - let t0 = e12.mul(t0, result); - let result = e12.frobenius_square(t0); - let result = e12.mul(result, t0); - let is_one = e12.is_one(result); - if (is_one != 0) { - %{ print(f"Easy part of the final exponentiation is 1, avoid computing the hard part.") %} - let r = e12.one(); - return r; - } - // Hard part (up to permutation) - // Daiki Hayashida, Kenichiro Hayasaka and Tadanori Teruya - // https://eprint.iacr.org/2020/875.pdf - let t0 = e12.cyclotomic_square(result); - let t1 = e12.expt_half(t0); - let t2 = e12.conjugate(result); - let t1 = e12.mul(t1, t2); - let t2 = e12.expt(t1); - let t1 = e12.conjugate(t1); - let t1 = e12.mul(t1, t2); - let t2 = e12.expt(t1); - let t1 = e12.frobenius(t1); - let t1 = e12.mul(t1, t2); - let result = e12.mul(result, t0); - let t0 = e12.expt(t1); - let t2 = e12.expt(t0); - let t0 = e12.frobenius_square(t1); - let t1 = e12.conjugate(t1); - let t1 = e12.mul(t1, t2); - let t1 = e12.mul(t1, t0); - let result = e12.mul(result, t1); - return result; -} - -// Binary decomposition of -x₀ = 29793968203157093288 little endian -func get_loop_digit(index: felt) -> (b: felt) { - let (data) = get_label_location(bits); - let bit_array = cast(data, felt*); - return (bit_array[index],); - - bits: - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 1; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 1; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 1; - dw 0; - dw 0; - dw 1; - dw 0; - dw 1; - dw 1; -} diff --git a/archive_tmp/bls12_381/towers/e12.cairo b/archive_tmp/bls12_381/towers/e12.cairo deleted file mode 100644 index be38c149..00000000 --- a/archive_tmp/bls12_381/towers/e12.cairo +++ /dev/null @@ -1,550 +0,0 @@ -from src.bls12_381.towers.e6 import e6, E6 -from src.bls12_381.towers.e2 import e2, E2 -from src.bls12_381.fq import fq_bigint4, BigInt4, fq_eq_zero -from starkware.cairo.common.registers import get_fp_and_pc -from src.bls12_381.curve import ( - N_LIMBS, - DEGREE, - BASE, - P0, - P1, - P2, - P3, - NON_RESIDUE_E2_a0, - NON_RESIDUE_E2_a1, -) - -struct E12 { - c0: E6*, - c1: E6*, -} - -namespace e12 { - // Returns the conjugate of x in E12 - func conjugate{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c1 = e6.neg(x.c1); - local res: E12 = E12(x.c0, c1); - return &res; - } // OK - func is_one{}(x: E12*) -> felt { - let c1_is_zero = e6.is_zero(x.c1); - if (c1_is_zero == 0) { - return 0; - } - let c0_is_one = e6.is_one(x.c0); - return c0_is_one; - } - // Adds two E12 elements - func add{range_check_ptr}(x: E12*, y: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c0 = e6.add(x.c0, y.c0); - let c1 = e6.add(x.c1, y.c1); - local res: E12 = E12(c0, c1); - return &res; - } // OK - - // Subtracts two E12 elements - func sub{range_check_ptr}(x: E12*, y: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c0 = e6.sub(x.c0, y.c0); - let c1 = e6.sub(x.c1, y.c1); - local res: E12 = E12(c0, c1); - return &res; - } // OK - - // Returns 2*x in E12 - func double{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c0 = e6.double(x.c0); - let c1 = e6.double(x.c1); - local res: E12 = E12(c0, c1); - return &res; - } // OK - - func mul{range_check_ptr}(x: E12*, y: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let a = e6.add(x.c0, x.c1); - let b = e6.add(y.c0, y.c1); - let a = e6.mul(a, b); - let b = e6.mul(x.c0, y.c0); - let c = e6.mul(x.c1, y.c1); - let zC1 = e6.sub(a, b); - let zC1 = e6.sub(zC1, c); - let zC0 = e6.mul_by_non_residue(c); - let zC0 = e6.add(zC0, b); - local res: E12 = E12(zC0, zC1); - return &res; - } // OK - - // Multiplication by sparse element (c0, c1, 0, 0, c4) - func mul_by_014{range_check_ptr}(z: E12*, c0: E2*, c1: E2*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let a = e6.mul_by_01(z.c0, c0, c1); - let b = e6.mul_by_1(z.c1); - let d = e2.add(c1, e2.one()); - let zC1 = e6.add(z.c1, z.c0); - let zC1 = e6.mul_by_01(zC1, c0, d); - let zC1 = e6.sub(zC1, a); - let zC1 = e6.sub(zC1, b); - let zC0 = e6.mul_by_non_residue(b); - let zC0 = e6.add(zC0, a); - local res: E12 = E12(zC0, zC1); - return &res; - } // OK - - // Mul014By014 multiplication of sparse element (c0,c1,0,0,c4,0) by sparse element (d0,d1,0,0,d4,0) - func mul_014_by_014{range_check_ptr}(c0: E2*, c1: E2*, d0: E2*, d1: E2*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let x0 = e2.mul(c0, d0); - let x1 = e2.mul(c1, d1); - let x4 = e2.one(); - let tmp = e2.add(c0, x4); - let x04 = e2.add(d0, x4); - let x04 = e2.mul(x04, tmp); - let x04 = e2.sub(x04, x0); - let x04 = e2.sub(x04, x4); - let tmp = e2.add(c0, c1); - let x01 = e2.add(d0, d1); - let x01 = e2.mul(x01, tmp); - let x01 = e2.sub(x01, x0); - let x01 = e2.sub(x01, x1); - // todo : optimize aand add two - let tmp = e2.add(c1, x4); - let x14 = e2.add(d1, x4); - let x14 = e2.mul(x14, tmp); - let x14 = e2.sub(x14, x1); - let x14 = e2.sub(x14, x4); - let c0B0 = e2.mul_by_non_residue(x4); - let c0B0 = e2.add(c0B0, x0); - let c0B1 = x01; - let c0B2 = x1; - let c1B0 = e2.zero(); - let c1B1 = x04; - let c1B2 = x14; - tempvar res = new E12(new E6(c0B0, c0B1, c0B2), new E6(c1B0, c1B1, c1B2)); - return res; - } // OK - - func square{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c0 = e6.sub(x.c0, x.c1); - let c3 = e6.mul_by_non_residue(x.c1); - let c3 = e6.neg(c3); - let c3 = e6.add(x.c0, c3); - let c2 = e6.mul(x.c0, x.c1); - let c0 = e6.mul(c0, c3); - let c0 = e6.add(c0, c2); - let c1 = e6.double(c2); - let c2 = e6.mul_by_non_residue(c2); - let c0 = e6.add(c0, c2); - tempvar res = new E12(c0, c1); - return res; - } // OK - - func inverse{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local inv0: BigInt4; - local inv1: BigInt4; - local inv2: BigInt4; - local inv3: BigInt4; - local inv4: BigInt4; - local inv5: BigInt4; - local inv6: BigInt4; - local inv7: BigInt4; - local inv8: BigInt4; - local inv9: BigInt4; - local inv10: BigInt4; - local inv11: BigInt4; - tempvar inv = new E12( - new E6(new E2(&inv0, &inv1), new E2(&inv2, &inv3), new E2(&inv4, &inv5)), - new E6(new E2(&inv6, &inv7), new E2(&inv8, &inv9), new E2(&inv10, &inv11)), - ); - %{ - from starkware.cairo.common.math_utils import as_int - assert 1 < ids.N_LIMBS <= 12 - p, c0, c1=0, 6*[0], 6*[0] - c0_refs =[ids.x.c0.b0.a0, ids.x.c0.b0.a1, ids.x.c0.b1.a0, ids.x.c0.b1.a1, ids.x.c0.b2.a0, ids.x.c0.b2.a1] - c1_refs =[ids.x.c1.b0.a0, ids.x.c1.b0.a1, ids.x.c1.b1.a0, ids.x.c1.b1.a1, ids.x.c1.b2.a0, ids.x.c1.b2.a1] - - # E2 Tower: - def mul_e2(x:(int,int), y:(int,int)): - a = (x[0] + x[1]) * (y[0] + y[1]) % p - b, c = x[0]*y[0] % p, x[1]*y[1] % p - return (b - c) % p, (a - b - c) % p - def square_e2(x:(int,int)): - return mul_e2(x,x) - def double_e2(x:(int,int)): - return 2*x[0]%p, 2*x[1]%p - def sub_e2(x:(int,int), y:(int,int)): - return (x[0]-y[0]) % p, (x[1]-y[1]) % p - def neg_e2(x:(int,int)): - return -x[0] % p, -x[1] % p - def mul_by_non_residue_e2(x:(int, int)): - return mul_e2(x, (ids.NON_RESIDUE_E2_a0, ids.NON_RESIDUE_E2_a1)) - def add_e2(x:(int,int), y:(int,int)): - return (x[0]+y[0]) % p, (x[1]+y[1]) % p - def inv_e2(a:(int, int)): - t0, t1 = (a[0] * a[0] % p, a[1] * a[1] % p) - t0 = (t0 + t1) % p - t1 = pow(t0, -1, p) - return a[0] * t1 % p, -(a[1] * t1) % p - - # E6 Tower: - def mul_by_non_residue_e6(x:((int,int),(int,int),(int,int))): - return mul_by_non_residue_e2(x[2]), x[0], x[1] - def sub_e6(x:((int,int), (int,int), (int,int)),y:((int,int), (int,int), (int,int))): - return (sub_e2(x[0], y[0]), sub_e2(x[1], y[1]), sub_e2(x[2], y[2])) - def neg_e6(x:((int,int), (int,int), (int,int))): - return neg_e2(x[0]), neg_e2(x[1]), neg_e2(x[2]) - def inv_e6(x:((int,int),(int,int),(int,int))): - t0, t1, t2 = square_e2(x[0]), square_e2(x[1]), square_e2(x[2]) - t3, t4, t5 = mul_e2(x[0], x[1]), mul_e2(x[0], x[2]), mul_e2(x[1], x[2]) - c0 = add_e2(neg_e2(mul_by_non_residue_e2(t5)), t0) - c1 = sub_e2(mul_by_non_residue_e2(t2), t3) - c2 = sub_e2(t1, t4) - t6 = mul_e2(x[0], c0) - d1 = mul_e2(x[2], c1) - d2 = mul_e2(x[1], c2) - d1 = mul_by_non_residue_e2(add_e2(d1, d2)) - t6 = add_e2(t6, d1) - t6 = inv_e2(t6) - return mul_e2(c0, t6), mul_e2(c1, t6), mul_e2(c2, t6) - - - def mul_e6(x:((int,int),(int,int),(int,int)), y:((int,int),(int,int),(int,int))): - assert len(x) == 3 and len(y) == 3 and len(x[0]) == 2 and len(x[1]) == 2 and len(x[2]) == 2 and len(y[0]) == 2 and len(y[1]) == 2 and len(y[2]) == 2 - t0, t1, t2 = mul_e2(x[0], y[0]), mul_e2(x[1], y[1]), mul_e2(x[2], y[2]) - c0 = add_e2(x[1], x[2]) - tmp = add_e2(y[1], y[2]) - c0 = mul_e2(c0, tmp) - c0 = sub_e2(c0, t1) - c0 = sub_e2(c0, t2) - c0 = mul_by_non_residue_e2(c0) - c0 = add_e2(c0, t0) - c1 = add_e2(x[0], x[1]) - tmp = add_e2(y[0], y[1]) - c1 = mul_e2(c1, tmp) - c1 = sub_e2(c1, t0) - c1 = sub_e2(c1, t1) - tmp = mul_by_non_residue_e2(t2) - c1 = add_e2(c1, tmp) - tmp = add_e2(x[0], x[2]) - c2 = add_e2(y[0], y[2]) - c2 = mul_e2(c2, tmp) - c2 = sub_e2(c2, t0) - c2 = sub_e2(c2, t2) - c2 = add_e2(c2, t1) - return c0, c1, c2 - def square_e6(x:((int,int),(int,int),(int,int))): - return mul_e6(x,x) - - def inv_e12(c0:((int,int),(int,int),(int,int)), c1:((int,int),(int,int),(int,int))): - t0, t1 = square_e6(c0), square_e6(c1) - tmp = mul_by_non_residue_e6(t1) - t0 = sub_e6(t0, tmp) - t1 = inv_e6(t0) - c0 = mul_e6(c0, t1) - c1 = mul_e6(c1, t1) - c1 = neg_e6(c1) - return [c0[0][0], c0[0][1], c0[1][0], c0[1][1], c0[2][0], c0[2][1], c1[0][0], c1[0][1], c1[1][0], c1[1][1], c1[2][0], c1[2][1]] - for i in range(ids.N_LIMBS): - for k in range(6): - c0[k]+=as_int(getattr(c0_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - c1[k]+=as_int(getattr(c1_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - c0 = ((c0[0],c0[1]),(c0[2],c0[3]),(c0[4],c0[5])) - c1 = ((c1[0],c1[1]),(c1[2],c1[3]),(c1[4],c1[5])) - x_inv = inv_e12(c0,c1) - e = [split(x) for x in x_inv] - for i in range(12): - for l in range(ids.N_LIMBS): - setattr(getattr(ids,f"inv{i}"),f"d{l}",e[i][l]) - %} - let check = e12.mul(x, inv); - let one = e12.one(); - let check = e12.sub(check, one); - let check_is_zero: felt = e12.is_zero(check); - assert check_is_zero = 1; - return inv; - } - - func is_zero{range_check_ptr}(x: E12*) -> felt { - let c0_is_zero = e6.is_zero(x.c0); - if (c0_is_zero == 0) { - return 0; - } - - let c1_is_zero = e6.is_zero(x.c1); - return c1_is_zero; - } // OK - - func zero{}() -> E12* { - let c0 = e6.zero(); - let c1 = e6.zero(); - tempvar res = new E12(c0, c1); - return res; - } // OK - - func one{}() -> E12* { - let c0 = e6.one(); - let c1 = e6.zero(); - tempvar res = new E12(c0, c1); - return res; - } // OK - - func frobenius{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c0B0 = e2.conjugate(x.c0.b0); - let c0B1 = e2.conjugate(x.c0.b1); - let c0B2 = e2.conjugate(x.c0.b2); - let c1B0 = e2.conjugate(x.c1.b0); - let c1B1 = e2.conjugate(x.c1.b1); - let c1B2 = e2.conjugate(x.c1.b2); - - let c0B1 = e2.mul_by_non_residue_1_power_2(c0B1); - let c0B2 = e2.mul_by_non_residue_1_power_4(c0B2); - let c1B0 = e2.mul_by_non_residue_1_power_1(c1B0); - let c1B1 = e2.mul_by_non_residue_1_power_3(c1B1); - let c1B2 = e2.mul_by_non_residue_1_power_5(c1B2); - - local c0: E6 = E6(c0B0, c0B1, c0B2); - local c1: E6 = E6(c1B0, c1B1, c1B2); - local res: E12 = E12(&c0, &c1); - return &res; - } - - func frobenius_square{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c0B0 = x.c0.b0; - let c0B1 = e2.mul_by_non_residue_2_power_2(x.c0.b1); - let c0B2 = e2.mul_by_non_residue_2_power_4(x.c0.b2); - let c1B0 = e2.mul_by_non_residue_2_power_1(x.c1.b0); - let c1B1 = e2.mul_by_non_residue_2_power_3(x.c1.b1); - let c1B2 = e2.mul_by_non_residue_2_power_5(x.c1.b2); - local c0: E6 = E6(c0B0, c0B1, c0B2); - local c1: E6 = E6(c1B0, c1B1, c1B2); - local res: E12 = E12(&c0, &c1); - return &res; - } - - func cyclotomic_square{range_check_ptr}(x: E12*) -> E12* { - // // x=(x0,x1,x2,x3,x4,x5,x6,x7) in E2^6 - // // cyclosquare(x)=(3*x4^2*u + 3*x0^2 - 2*x0, - // // 3*x2^2*u + 3*x3^2 - 2*x1, - // // 3*x5^2*u + 3*x1^2 - 2*x2, - // // 6*x1*x5*u + 2*x3, - // // 6*x0*x4 + 2*x4, - // // 6*x2*x3 + 2*x5) - - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let t0 = e2.square(x.c1.b1); - let t1 = e2.square(x.c0.b0); - let t6 = e2.add(x.c1.b1, x.c0.b0); - let t6 = e2.square(t6); - let t6 = e2.sub(t6, t0); - let t6 = e2.sub(t6, t1); // 2*x4*x0 - let t2 = e2.square(x.c0.b2); - let t3 = e2.square(x.c1.b0); - let t7 = e2.add(x.c0.b2, x.c1.b0); - let t7 = e2.square(t7); - let t7 = e2.sub(t7, t2); - let t7 = e2.sub(t7, t3); // 2*x2*x3 - - let t4 = e2.square(x.c1.b2); - let t5 = e2.square(x.c0.b1); - - let t8 = e2.add(x.c1.b2, x.c0.b1); - let t8 = e2.square(t8); - let t8 = e2.sub(t8, t4); - let t8 = e2.sub(t8, t5); - let t8 = e2.mul_by_non_residue(t8); // 2*x5*x1*u - - let t0 = e2.mul_by_non_residue(t0); - let t0 = e2.add(t0, t1); // x4^2*u + x0^2 - let t2 = e2.mul_by_non_residue(t2); - let t2 = e2.add(t2, t3); // x2^2*u + x3^2 - let t4 = e2.mul_by_non_residue(t4); - let t4 = e2.add(t4, t5); // x5^2*u + x1^2 - - let zc0b0 = e2.sub(t0, x.c0.b0); - let zc0b0 = e2.double(zc0b0); - let zc0b0 = e2.add(zc0b0, t0); - - let zc0b1 = e2.sub(t2, x.c0.b1); - let zc0b1 = e2.double(zc0b1); - let zc0b1 = e2.add(zc0b1, t2); - - let zc0b2 = e2.sub(t4, x.c0.b2); - let zc0b2 = e2.double(zc0b2); - let zc0b2 = e2.add(zc0b2, t4); - - let zc1b0 = e2.add(t8, x.c1.b0); - let zc1b0 = e2.double(zc1b0); - let zc1b0 = e2.add(zc1b0, t8); - - let zc1b1 = e2.add(t6, x.c1.b1); - let zc1b1 = e2.double(zc1b1); - let zc1b1 = e2.add(zc1b1, t6); - - let zc1b2 = e2.add(t7, x.c1.b2); - let zc1b2 = e2.double(zc1b2); - let zc1b2 = e2.add(zc1b2, t7); - - local c0: E6 = E6(zc0b0, zc0b1, zc0b2); - local c1: E6 = E6(zc1b0, zc1b1, zc1b2); - local res: E12 = E12(&c0, &c1); - return &res; - } // OK - - func n_square{range_check_ptr}(x: E12*, n: felt) -> E12* { - if (n == 0) { - return x; - } else { - let res = cyclotomic_square(x); - return n_square(res, n - 1); - } - } // OK - - // Returns x^(t/2) in E12 where - // t/2 = 7566188111470821376 // negative - func expt_half{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let t0 = n_square(x, 15); - let t1 = n_square(t0, 32); - - let Karabina_0: E12* = decompress_Karabina(t0); - let Karabina_1: E12* = decompress_Karabina(t1); - let res = e12.mul(Karabina_0, Karabina_1); - - let Karabina_1 = n_square(Karabina_1, 9); - let res = e12.mul(res, Karabina_1); - - let Karabina_1 = n_square(Karabina_1, 3); - let res = e12.mul(res, Karabina_1); - - let Karabina_1 = n_square(Karabina_1, 2); - let res = e12.mul(res, Karabina_1); - - let Karabina_1 = cyclotomic_square(Karabina_1); - let res = e12.mul(res, Karabina_1); - - return conjugate(res); - } // OK - func decompress_Karabina{range_check_ptr}(x0: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - %{ - import numpy as np - def print_e2(x, n): - t0_a0 = x.a0.d0 + x.a0.d1 * ids.BASE + x.a0.d2 * ids.BASE**2 + x.a0.d3 * ids.BASE**3 - t0_a1 = x.a1.d0 + x.a1.d1 * ids.BASE + x.a1.d2 * ids.BASE**2 + x.a1.d3 * ids.BASE**3 - print(f"{n} = {np.base_repr(t0_a0,36)} + {np.base_repr(t0_a1,36)} * i") - def print_e12(id): - le=[] - le+=[id.c0.b0.a0.d0 + id.c0.b0.a0.d1 * ids.BASE + id.c0.b0.a0.d2 * ids.BASE**2 + id.c0.b0.a0.d3 * ids.BASE**3] - le+=[id.c0.b0.a1.d0 + id.c0.b0.a1.d1 * ids.BASE + id.c0.b0.a1.d2 * ids.BASE**2 + id.c0.b0.a1.d3 * ids.BASE**3] - le+=[id.c0.b1.a0.d0 + id.c0.b1.a0.d1 * ids.BASE + id.c0.b1.a0.d2 * ids.BASE**2 + id.c0.b1.a0.d3 * ids.BASE**3] - le+=[id.c0.b1.a1.d0 + id.c0.b1.a1.d1 * ids.BASE + id.c0.b1.a1.d2 * ids.BASE**2 + id.c0.b1.a1.d3 * ids.BASE**3] - le+=[id.c0.b2.a0.d0 + id.c0.b2.a0.d1 * ids.BASE + id.c0.b2.a0.d2 * ids.BASE**2 + id.c0.b2.a0.d3 * ids.BASE**3] - le+=[id.c0.b2.a1.d0 + id.c0.b2.a1.d1 * ids.BASE + id.c0.b2.a1.d2 * ids.BASE**2 + id.c0.b2.a1.d3 * ids.BASE**3] - le+=[id.c1.b0.a0.d0 + id.c1.b0.a0.d1 * ids.BASE + id.c1.b0.a0.d2 * ids.BASE**2 + id.c1.b0.a0.d3 * ids.BASE**3] - le+=[id.c1.b0.a1.d0 + id.c1.b0.a1.d1 * ids.BASE + id.c1.b0.a1.d2 * ids.BASE**2 + id.c1.b0.a1.d3 * ids.BASE**3] - le+=[id.c1.b1.a0.d0 + id.c1.b1.a0.d1 * ids.BASE + id.c1.b1.a0.d2 * ids.BASE**2 + id.c1.b1.a0.d3 * ids.BASE**3] - le+=[id.c1.b1.a1.d0 + id.c1.b1.a1.d1 * ids.BASE + id.c1.b1.a1.d2 * ids.BASE**2 + id.c1.b1.a1.d3 * ids.BASE**3] - le+=[id.c1.b2.a0.d0 + id.c1.b2.a0.d1 * ids.BASE + id.c1.b2.a0.d2 * ids.BASE**2 + id.c1.b2.a0.d3 * ids.BASE**3] - le+=[id.c1.b2.a1.d0 + id.c1.b2.a1.d1 * ids.BASE + id.c1.b2.a1.d2 * ids.BASE**2 + id.c1.b2.a1.d3 * ids.BASE**3] - [print('e'+str(i), np.base_repr(le[i],16)) for i in range(12)] - %} - // x0 - local t0: E2*; - local t1: E2*; - let one = e2.one(); - let is_zero = e2.is_zero(x0.c1.b2); - if (is_zero == 1) { - // g3 = 0 - let t0t = e2.mul(x0.c0.b1, x0.c1.b2); - let t0t = e2.double(t0t); - let is_zero_g2 = e2.is_zero(x0.c0.b2); - if (is_zero_g2 == 1) { - tempvar range_check_ptr = range_check_ptr; - return x0; - } else { - assert t0 = t0t; - assert t1 = x0.c0.b2; - } - - tempvar range_check_ptr = range_check_ptr; - } else { - let t0t = e2.square(x0.c0.b1); - let t1t = e2.sub(t0t, x0.c0.b2); - - let t1t = e2.double(t1t); - let t1t = e2.add(t1t, t0t); - - let t20 = e2.square(x0.c1.b2); - let t0t = e2.mul_by_non_residue(t20); - let t0t = e2.add(t0t, t1t); - let t1t = e2.double(x0.c1.b0); - let t1t = e2.double(t1t); - assert t0 = t0t; - assert t1 = t1t; - tempvar range_check_ptr = range_check_ptr; - } - let t1t = e2.inv(t1); - let x0c1b1 = e2.mul(t0, t1t); - - let t1t = e2.mul(x0.c0.b2, x0.c0.b1); - let t20 = e2.square(x0c1b1); - let t20 = e2.sub(t20, t1t); - let t20 = e2.double(t20); - let t20 = e2.sub(t20, t1t); - - let t1 = e2.mul(x0.c1.b0, x0.c1.b2); - - let t20 = e2.add(t20, t1); - let x0c0b0 = e2.mul_by_non_residue(t20); - let x0c0b0 = e2.add(x0c0b0, one); - - local res0c0: E6 = E6(x0c0b0, x0.c0.b1, x0.c0.b2); - local res0c1: E6 = E6(x0.c1.b0, x0c1b1, x0.c1.b2); - local res0: E12 = E12(&res0c0, &res0c1); - - return &res0; - } - // Returns x^t - // where t is = 15132376222941642752 // negative - func expt{range_check_ptr}(x: E12*) -> E12* { - let res = expt_half(x); - return cyclotomic_square(res); - } // OK - - func assert_E12(x: E12*, z: E12*) { - e6.assert_E6(x.c0, z.c0); - e6.assert_E6(x.c1, z.c1); - return (); - } // OK -} diff --git a/archive_tmp/bls12_381/towers/e2.cairo b/archive_tmp/bls12_381/towers/e2.cairo deleted file mode 100644 index f21dd317..00000000 --- a/archive_tmp/bls12_381/towers/e2.cairo +++ /dev/null @@ -1,488 +0,0 @@ -from src.bls12_381.fq import ( - fq_bigint4, - BigInt4, - fq_eq_zero, - fq_eq_one, - UnreducedBigInt7, - bigint4_mul, - bigint4_sq, -) -from starkware.cairo.common.registers import get_fp_and_pc -from src.bls12_381.curve import N_LIMBS, DEGREE, BASE, P0, P1, P2, P3 - -struct E2 { - a0: BigInt4*, - a1: BigInt4*, -} - -// def mul_e2(x:(int,int), y:(int,int)): -// a = (x[0] + x[1]) * (y[0] + y[1]) % p -// b, c = x[0]*y[0] % p, x[1]*y[1] % p -// return (b - c) % p, (a - b - c) % p -func mul_e2_unreduced{range_check_ptr}(x: E2*, y: E2*) -> ( - x_unreduced: UnreducedBigInt7, y_unreduced: UnreducedBigInt7 -) { - let x_s: BigInt4 = BigInt4( - x.a0.d0 + x.a1.d0, x.a0.d1 + x.a1.d1, x.a0.d2 + x.a1.d2, x.a0.d3 + x.a1.d3 - ); - let y_s: BigInt4 = BigInt4( - y.a0.d0 + y.a1.d0, y.a0.d1 + y.a1.d1, y.a0.d2 + y.a1.d2, y.a0.d3 + y.a1.d3 - ); - let (a) = bigint4_mul(x_s, y_s); - let (b) = bigint4_mul([x.a0], [y.a0]); - let (c) = bigint4_mul([x.a1], [y.a1]); - - return ( - UnreducedBigInt7( - d0=b.d0 - c.d0, - d1=b.d1 - c.d1, - d2=b.d2 - c.d2, - d3=b.d3 - c.d3, - d4=b.d4 - c.d4, - d5=b.d5 - c.d5, - d6=b.d6 - c.d6, - ), - UnreducedBigInt7( - d0=a.d0 - b.d0 - c.d0, - d1=a.d1 - b.d1 - c.d1, - d2=a.d2 - b.d2 - c.d2, - d3=a.d3 - b.d3 - c.d3, - d4=a.d4 - b.d4 - c.d4, - d5=a.d5 - b.d5 - c.d5, - d6=a.d6 - b.d6 - c.d6, - ), - ); -} - -func square_e2_unreduced{range_check_ptr}(x: E2*) -> ( - x_unreduced: UnreducedBigInt7, y_unreduced: UnreducedBigInt7 -) { - let x_s: BigInt4 = BigInt4( - x.a0.d0 + x.a1.d0, x.a0.d1 + x.a1.d1, x.a0.d2 + x.a1.d2, x.a0.d3 + x.a1.d3 - ); - let (a) = bigint4_sq(x_s); - let (b) = bigint4_sq([x.a0]); - let (c) = bigint4_sq([x.a1]); - - return ( - UnreducedBigInt7( - d0=b.d0 - c.d0, - d1=b.d1 - c.d1, - d2=b.d2 - c.d2, - d3=b.d3 - c.d3, - d4=b.d4 - c.d4, - d5=b.d5 - c.d5, - d6=b.d6 - c.d6, - ), - UnreducedBigInt7( - d0=a.d0 - b.d0 - c.d0, - d1=a.d1 - b.d1 - c.d1, - d2=a.d2 - b.d2 - c.d2, - d3=a.d3 - b.d3 - c.d3, - d4=a.d4 - b.d4 - c.d4, - d5=a.d5 - b.d5 - c.d5, - d6=a.d6 - b.d6 - c.d6, - ), - ); -} - -namespace e2 { - func zero{}() -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local zero_bigint4: BigInt4 = BigInt4(0, 0, 0, 0); - local zero: E2 = E2(&zero_bigint4, &zero_bigint4); - return &zero; - } - func one{}() -> E2* { - tempvar one = new E2(new BigInt4(1, 0, 0, 0), new BigInt4(0, 0, 0, 0)); - return one; - } - func is_zero{}(x: E2*) -> felt { - let a0_is_zero = fq_eq_zero(x.a0); - if (a0_is_zero == 0) { - return 0; - } - - let a1_is_zero = fq_eq_zero(x.a1); - return a1_is_zero; - } - func is_one{}(x: E2*) -> felt { - let a1_is_zero = fq_eq_zero(x.a1); - if (a1_is_zero == 0) { - return 0; - } - - let a0_is_one = fq_eq_one(x.a0); - return a0_is_one; - } - func conjugate{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let a1 = fq_bigint4.neg(x.a1); - // let res = E2(x.a0, a1); - tempvar res: E2* = new E2(x.a0, a1); - return res; - } - - func inv{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local inv0: BigInt4; - local inv1: BigInt4; - %{ - from starkware.cairo.common.math_utils import as_int - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - a0,a1,p=0,0,0 - - def split(x, degree=ids.DEGREE, base=ids.BASE): - coeffs = [] - for n in range(degree, 0, -1): - q, r = divmod(x, base ** n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - for i in range(ids.N_LIMBS): - a0+=as_int(getattr(ids.x.a0, 'd'+str(i)), PRIME) * ids.BASE**i - a1+=as_int(getattr(ids.x.a1, 'd'+str(i)), PRIME) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - def inv_e2(a0:int, a1:int): - t0, t1 = (a0 * a0 % p, a1 * a1 % p) - t0 = (t0 + t1) % p - t1 = pow(t0, -1, p) - return (a0 * t1 % p, -(a1 * t1) % p) - - inverse0, inverse1 = inv_e2(a0, a1) - inv0, inv1 =split(inverse0), split(inverse1) - for i in range(ids.N_LIMBS): - setattr(ids.inv0, 'd'+str(i), inv0[i]) - setattr(ids.inv1, 'd'+str(i), inv1[i]) - %} - local inverse: E2 = E2(&inv0, &inv1); - - let check = e2.mul(x, &inverse); - let one = e2.one(); - let check = e2.sub(check, one); - let check_is_zero: felt = e2.is_zero(check); - assert check_is_zero = 1; - return &inverse; - } - func add{range_check_ptr}(x: E2*, y: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let a0 = fq_bigint4.add(x.a0, y.a0); - let a1 = fq_bigint4.add(x.a1, y.a1); - // let res = E2(a0, a1); - local res: E2 = E2(a0, a1); - return &res; - } - func add_one{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local one: BigInt4 = BigInt4(1, 0, 0, 0); - let a0 = fq_bigint4.add(x.a0, &one); - local res: E2 = E2(a0, x.a1); - return &res; - } - - func double{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let a0 = fq_bigint4.add(x.a0, x.a0); - let a1 = fq_bigint4.add(x.a1, x.a1); - local res: E2 = E2(a0, a1); - return &res; - } - func neg{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let zero_2 = e2.zero(); - let res = sub(zero_2, x); - return res; - } - func sub{range_check_ptr}(x: E2*, y: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let a0 = fq_bigint4.sub(x.a0, y.a0); - let a1 = fq_bigint4.sub(x.a1, y.a1); - local res: E2 = E2(a0, a1); - return &res; - } - func mul_by_element{range_check_ptr}(n: BigInt4*, x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let a0 = fq_bigint4.mul(x.a0, n); - let a1 = fq_bigint4.mul(x.a1, n); - local res: E2 = E2(a0, a1); - return &res; - } - func mul{range_check_ptr}(x: E2*, y: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // Unreduced addition - // tempvar a = new BigInt4( - // x.a0.d0 + x.a1.d0, x.a0.d1 + x.a1.d1, x.a0.d2 + x.a1.d2, x.a0.d3 + x.a1.d3 - // ); - // tempvar b = new BigInt4( - // y.a0.d0 + y.a1.d0, y.a0.d1 + y.a1.d1, y.a0.d2 + y.a1.d2, y.a0.d3 + y.a1.d3 - // ); - - let a0 = fq_bigint4.add(x.a0, x.a1); - let b0 = fq_bigint4.add(y.a0, y.a1); - let a = fq_bigint4.mul(a0, b0); - let b = fq_bigint4.mul(x.a0, y.a0); - let c = fq_bigint4.mul(x.a1, y.a1); - let z_a1 = fq_bigint4.sub(a, b); - let z_a1 = fq_bigint4.sub(z_a1, c); - let z_a0 = fq_bigint4.sub(b, c); - - local res: E2 = E2(z_a0, z_a1); - return &res; - } - - func square{range_check_ptr}(x: E2*) -> E2* { - // z.A0 = (x.A0 + x.A1) * (x.A0 - x.A1) - // z.A1 = 2 * x.A0 * x.A1 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let sum = fq_bigint4.add(x.a0, x.a1); - let diff = fq_bigint4.sub(x.a0, x.a1); - let a0 = fq_bigint4.mul(sum, diff); - - let mul = fq_bigint4.mul(x.a0, x.a1); - let a1 = fq_bigint4.add(mul, mul); - local res: E2 = E2(a0, a1); - return &res; - } - - // MulByNonResidue multiplies a E2 by (1,1) - func mul_by_non_residue{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let a = fq_bigint4.add(x.a0, x.a1); - let a = fq_bigint4.add(a, a); // mul by 2 - let b = x.a0; // mul by 1 - let z_a1 = fq_bigint4.sub(a, b); - let z_a1 = fq_bigint4.sub(z_a1, x.a1); - let z_a0 = fq_bigint4.sub(b, x.a1); - - local res: E2 = E2(z_a0, z_a1); - return &res; - } - func mul_by_non_residue_1_power_1{range_check_ptr}(x: E2*) -> E2* { - // 3850754370037169011952147076051364057158807420970682438676050522613628423219637725072182697113062777891589506424760 - // 151655185184498381465642749684540099398075398968325446656007613510403227271200139370504932015952886146304766135027 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b0: BigInt4 = BigInt4( - d0=30918888157334040571029970872, - d1=38110497911700059231495300413, - d2=9956775014100533415029595983, - d3=7742960891664846859912986292, - ); - - local b1: BigInt4 = BigInt4( - d0=23961508344847352386299906803, - d1=73053643719720755424335321793, - d2=10870206300725050764578763631, - d3=304942890421345320673339650, - ); - - local b: E2 = E2(&b0, &b1); - - return e2.mul(x, &b); - } - - func mul_by_non_residue_1_power_2{range_check_ptr}(x: E2*) -> E2* { - // (0,4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436) - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b0: BigInt4 = BigInt4(d0=0, d1=0, d2=0, d3=0); - - local b1: BigInt4 = BigInt4( - d0=24538776241284729507437128364, - d1=42550757554255812588943452139, - d2=30896359077101218988767419092, - d3=8047903782086192178990825606, - ); - - local b: E2 = E2(&b0, &b1); - - return e2.mul(x, &b); - } - - func mul_by_non_residue_1_power_3{range_check_ptr}(x: E2*) -> E2* { - // (1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257,1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257) - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b0: BigInt4 = BigInt4( - d0=35566625740316527277988105225, - d1=37127840730814273605658450223, - d2=33368165978403992854926148446, - d3=2068538268313381196677636973, - ); - - local b1: BigInt4 = BigInt4( - d0=35566625740316527277988105225, - d1=37127840730814273605658450223, - d2=33368165978403992854926148446, - d3=2068538268313381196677636973, - ); - - local b: E2 = E2(&b0, &b1); - - return e2.mul(x, &b); - } - - func mul_by_non_residue_1_power_4{range_check_ptr}(x: E2*) -> E2* { - // 4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939437 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b: BigInt4 = BigInt4( - d0=24538776241284729507437128365, - d1=42550757554255812588943452139, - d2=30896359077101218988767419092, - d3=8047903782086192178990825606, - ); - let a0 = fq_bigint4.mul(x.a0, &b); - let a1 = fq_bigint4.mul(x.a1, &b); - local res: E2 = E2(a0, a1); - return &res; - } - - func mul_by_non_residue_1_power_5{range_check_ptr}(x: E2*) -> E2* { - // (877076961050607968509681729531255177986764537961432449499635504522207616027455086505066378536590128544573588734230,3125332594171059424908108096204648978570118281977575435832422631601824034463382777937621250592425535493320683825557) alloc_locals; - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b0: BigInt4 = BigInt4( - d0=11605117395469174891688198422, - d1=43302359525357855774867078766, - d2=22497959677678942090347384814, - d3=1763595377892035876004297323, - ); - - local b1: BigInt4 = BigInt4( - d0=43275279106712218065641679253, - d1=67861782106062958880963543440, - d2=77557184151410979682804925136, - d3=6284308404194156304582028618, - ); - - local b: E2 = E2(&b0, &b1); - - return e2.mul(x, &b); - } - - func mul_by_non_residue_2_power_1{range_check_ptr}(x: E2*) -> E2* { - // 793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620351 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b: BigInt4 = BigInt4( - d0=30341620260896663449892749311, - d1=68613384077165002066887170067, - d2=69158784751988702784384890858, - d3=1595500335, - ); - let a0 = fq_bigint4.mul(x.a0, &b); - let a1 = fq_bigint4.mul(x.a1, &b); - local res: E2 = E2(a0, a1); - return &res; - } - - func mul_by_non_residue_2_power_2{range_check_ptr}(x: E2*) -> E2* { - // 793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b: BigInt4 = BigInt4( - d0=30341620260896663449892749310, - d1=68613384077165002066887170067, - d2=69158784751988702784384890858, - d3=1595500335, - ); - let a0 = fq_bigint4.mul(x.a0, &b); - let a1 = fq_bigint4.mul(x.a1, &b); - local res: E2 = E2(a0, a1); - return &res; - } - - func mul_by_non_residue_2_power_3{range_check_ptr}(x: E2*) -> E2* { - // 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559786 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b: BigInt4 = BigInt4( - d0=54880396502181392957329877674, - d1=31935979117156477062286671870, - d2=20826981314825584179608359615, - d3=8047903782086192180586325942, - ); - let a0 = fq_bigint4.mul(x.a0, &b); - let a1 = fq_bigint4.mul(x.a1, &b); - local res: E2 = E2(a0, a1); - return &res; - } - - func mul_by_non_residue_2_power_4{range_check_ptr}(x: E2*) -> E2* { - // 4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b: BigInt4 = BigInt4( - d0=24538776241284729507437128364, - d1=42550757554255812588943452139, - d2=30896359077101218988767419092, - d3=8047903782086192178990825606, - ); - let a0 = fq_bigint4.mul(x.a0, &b); - let a1 = fq_bigint4.mul(x.a1, &b); - local res: E2 = E2(a0, a1); - return &res; - } - - func mul_by_non_residue_2_power_5{range_check_ptr}(x: E2*) -> E2* { - // 4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939437 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b: BigInt4 = BigInt4( - d0=24538776241284729507437128365, - d1=42550757554255812588943452139, - d2=30896359077101218988767419092, - d3=8047903782086192178990825606, - ); - let a0 = fq_bigint4.mul(x.a0, &b); - let a1 = fq_bigint4.mul(x.a1, &b); - local res: E2 = E2(a0, a1); - return &res; - } - - func assert_E2(x: E2*, z: E2*) { - assert x.a0.d0 = z.a0.d0; - assert x.a0.d1 = z.a0.d1; - assert x.a0.d2 = z.a0.d2; - assert x.a0.d3 = z.a0.d3; - assert x.a1.d0 = z.a1.d0; - assert x.a1.d1 = z.a1.d1; - assert x.a1.d2 = z.a1.d2; - assert x.a1.d3 = z.a1.d3; - return (); - } -} diff --git a/archive_tmp/bls12_381/towers/e6.cairo b/archive_tmp/bls12_381/towers/e6.cairo deleted file mode 100644 index 88e29e4a..00000000 --- a/archive_tmp/bls12_381/towers/e6.cairo +++ /dev/null @@ -1,253 +0,0 @@ -from src.bls12_381.towers.e2 import e2, E2 -from starkware.cairo.common.registers import get_fp_and_pc - -struct E6 { - b0: E2*, - b1: E2*, - b2: E2*, -} - -namespace e6 { - func add{range_check_ptr}(x: E6*, y: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b0 = e2.add(x.b0, y.b0); - let b1 = e2.add(x.b1, y.b1); - let b2 = e2.add(x.b2, y.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func sub{range_check_ptr}(x: E6*, y: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b0 = e2.sub(x.b0, y.b0); - let b1 = e2.sub(x.b1, y.b1); - let b2 = e2.sub(x.b2, y.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func double{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b0 = e2.double(x.b0); - let b1 = e2.double(x.b1); - let b2 = e2.double(x.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func neg{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b0 = e2.neg(x.b0); - let b1 = e2.neg(x.b1); - let b2 = e2.neg(x.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func mul{range_check_ptr}(x: E6*, y: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let t0 = e2.mul(x.b0, y.b0); - let t1 = e2.mul(x.b1, y.b1); - let t2 = e2.mul(x.b2, y.b2); - let c0 = e2.add(x.b1, x.b2); - let tmp = e2.add(y.b1, y.b2); - let c0 = e2.mul(c0, tmp); - let c0 = e2.sub(c0, t1); - let c0 = e2.sub(c0, t2); - let c0 = e2.mul_by_non_residue(c0); - let c0 = e2.add(c0, t0); - let c1 = e2.add(x.b0, x.b1); - let tmp = e2.add(y.b0, y.b1); - let c1 = e2.mul(c1, tmp); - let c1 = e2.sub(c1, t0); - let c1 = e2.sub(c1, t1); - let tmp = e2.mul_by_non_residue(t2); - let c1 = e2.add(c1, tmp); - let tmp = e2.add(x.b0, x.b2); - let c2 = e2.add(y.b0, y.b2); - let c2 = e2.mul(c2, tmp); - let c2 = e2.sub(c2, t0); - let c2 = e2.sub(c2, t2); - let c2 = e2.add(c2, t1); - local res: E6 = E6(c0, c1, c2); - return &res; - } - - // Square sets z to the E6 product of x,x, returns z - func square{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let c4 = e2.mul(x.b0, x.b1); - let c4 = e2.double(c4); - let c5 = e2.square(x.b2); - let c1 = e2.mul_by_non_residue(c5); - let c1 = e2.add(c1, c4); - let c2 = e2.sub(c4, c5); - let c3 = e2.square(x.b0); - let c4 = e2.sub(x.b0, x.b1); - let c4 = e2.add(c4, x.b2); - let c5 = e2.mul(x.b1, x.b2); - let c5 = e2.double(c5); - let c4 = e2.square(c4); - let c0 = e2.mul_by_non_residue(c5); - let c0 = e2.add(c0, c3); - let c2 = e2.add(c2, c4); - let c2 = e2.add(c2, c5); - let c2 = e2.sub(c2, c3); - local res: E6 = E6(c0, c1, c2); - return &res; - } - - // Invert E6 element - // func inverse{range_check_ptr}(x: E6*) -> E6* { - // alloc_locals; - - // let t0 = e2.square(x.b0); - // let t1 = e2.square(x.b1); - // let t2 = e2.square(x.b2); - // let t3 = e2.mul(x.b0, x.b1); - // let t4 = e2.mul(x.b0, x.b2); - // let t5 = e2.mul(x.b1, x.b2); - // let c0 = e2.mul_by_non_residue(t5); - // let c0 = e2.neg(c0); - // let c0 = e2.add(c0, t0); - // let c1 = e2.mul_by_non_residue(t2); - // let c1 = e2.sub(c1, t3); - // let c2 = e2.sub(t1, t4); - // let t6 = e2.mul(x.b0, c0); - // let d1 = e2.mul(x.b2, c1); - // let d2 = e2.mul(x.b1, c2); - // let d1 = e2.add(d1, d2); - // let d1 = e2.mul_by_non_residue(d1); - // let t6 = e2.add(t6, d1); - // let t6 = e2.inv(t6); - // let b0 = e2.mul(c0, t6); - // let b1 = e2.mul(c1, t6); - // let b2 = e2.mul(c2, t6); - // tempvar res = new E6(b0, b1, b2); - // return res; - // } - - func mul_by_non_residue{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let zB0 = x.b2; - let zB1 = x.b0; - let zB2 = x.b1; - let zB0 = e2.mul_by_non_residue(zB0); - local res: E6 = E6(zB0, zB1, zB2); - return &res; - } - - func mul_by_E2{range_check_ptr}(x: E6*, y: E2*) -> E6* { - alloc_locals; - let b0 = e2.mul(x.b0, y); - let b1 = e2.mul(x.b1, y); - let b2 = e2.mul(x.b2, y); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func mul_by_01{range_check_ptr}(x: E6*, b0: E2*, b1: E2*) -> E6* { - alloc_locals; - let a = e2.mul(x.b0, b0); - let b = e2.mul(x.b1, b1); - let tmp = e2.add(x.b1, x.b2); - let t0 = e2.mul(b1, tmp); - let t0 = e2.sub(t0, b); - let t0 = e2.mul_by_non_residue(t0); - let t0 = e2.add(t0, a); - - let tmp = e2.add(x.b0, x.b2); - let t2 = e2.mul(b0, tmp); - let t2 = e2.sub(t2, a); - let t2 = e2.add(t2, b); - - let t1 = e2.add(b0, b1); - let tmp = e2.add(x.b0, x.b1); - let t1 = e2.mul(t1, tmp); - let t1 = e2.sub(t1, a); - let t1 = e2.sub(t1, b); - - tempvar res = new E6(t0, t1, t2); - return res; - } - - // MulBy1 multiplication of E6 by sparse element (0, c1, 0) - func mul_by_1{range_check_ptr}(z: E6*) -> E6* { - alloc_locals; - let b = z.b1; - let t0 = e2.add(z.b1, z.b2); - // let t0 = e2.mul(c1, tmp); - let t0 = e2.sub(t0, b); - let t0 = e2.mul_by_non_residue(t0); - let t1 = e2.add(z.b0, z.b1); - // let t1 = e2.mul(c1, tmp); - let t1 = e2.sub(t1, b); - tempvar res = new E6(t0, t1, b); - return res; - } - - func zero{}() -> E6* { - let b0 = e2.zero(); - let b1 = e2.zero(); - let b2 = e2.zero(); - tempvar res = new E6(b0, b1, b2); - return res; - } - func one{}() -> E6* { - let b0 = e2.one(); - let b1 = e2.zero(); - let b2 = e2.zero(); - tempvar res = new E6(b0, b1, b2); - return res; - } - func is_zero{}(x: E6*) -> felt { - alloc_locals; - let b0_is_zero = e2.is_zero(x.b0); - - if (b0_is_zero == 0) { - return 0; - } - let b1_is_zero = e2.is_zero(x.b1); - - if (b1_is_zero == 0) { - return 0; - } - let b2_is_zero = e2.is_zero(x.b2); - return b2_is_zero; - } - func is_one{}(x: E6*) -> felt { - alloc_locals; - let b2_is_zero = e2.is_zero(x.b2); - - if (b2_is_zero == 0) { - return 0; - } - let b1_is_zero = e2.is_zero(x.b1); - - if (b1_is_zero == 0) { - return 0; - } - let b0_is_one = e2.is_one(x.b0); - return b0_is_one; - } - func assert_E6(x: E6*, z: E6*) { - e2.assert_E2(x.b0, z.b0); - e2.assert_E2(x.b1, z.b1); - e2.assert_E2(x.b2, z.b2); - return (); - } -} diff --git a/archive_tmp/bn254/g1.cairo b/archive_tmp/bn254/g1.cairo deleted file mode 100644 index 4fa16419..00000000 --- a/archive_tmp/bn254/g1.cairo +++ /dev/null @@ -1,342 +0,0 @@ -from starkware.cairo.common.cairo_secp.bigint import ( - BigInt3, - UnreducedBigInt3, - nondet_bigint3, - UnreducedBigInt5, - bigint_mul, -) -from starkware.cairo.common.cairo_secp.constants import BASE -from src.bn254.fq import ( - is_zero, - verify_zero5, - fq_bigint3, - N_LIMBS, - DEGREE, - assert_reduced_felt, - bigint_sqr, -) -from src.bn254.curve import P0, P1, P2 -from starkware.cairo.common.registers import get_fp_and_pc - -// Represents a point on the elliptic curve. -struct G1Point { - x: BigInt3, - y: BigInt3, -} - -namespace g1 { - func assert_on_curve{range_check_ptr}(pt: G1Point*) -> () { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - assert_reduced_felt(pt.x); - assert_reduced_felt(pt.y); - - let left = fq_bigint3.mul(pt.y, pt.y); - let x_sq = fq_bigint3.mul(pt.x, pt.x); - let x_cube = fq_bigint3.mul(x_sq, pt.x); - - assert left.d0 = x_cube.d0 + 3; - assert left.d1 = x_cube.d1; - assert left.d2 = x_cube.d2; - - return (); - } - func assert_equal(pt1: G1Point*, pt2: G1Point*) -> () { - assert 0 = pt1.x.d0 - pt2.x.d0; - assert 0 = pt1.x.d1 - pt2.x.d1; - assert 0 = pt1.x.d2 - pt2.x.d2; - assert 0 = pt1.y.d0 - pt2.y.d0; - assert 0 = pt1.y.d1 - pt2.y.d1; - assert 0 = pt1.y.d2 - pt2.y.d2; - return (); - } - func compute_doubling_slope{range_check_ptr}(pt: G1Point) -> (slope: BigInt3) { - // Note that y cannot be zero: assume that it is, then pt = -pt, so 2 * pt = 0, which - // contradicts the fact that the size of the curve is odd. - alloc_locals; - local slope: BigInt3; - %{ - from starkware.python.math_utils import div_mod - from src.hints.fq import bigint_pack, bigint_fill, get_p - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - - x = bigint_pack(ids.pt.x, ids.N_LIMBS, ids.BASE) - y = bigint_pack(ids.pt.y, ids.N_LIMBS, ids.BASE) - p = get_p(ids) - - slope = div_mod(3 * x ** 2, 2 * y, p) - - bigint_fill(slope, ids.slope, ids.N_LIMBS, ids.BASE) - %} - assert_reduced_felt(slope); - - let (x_sqr: UnreducedBigInt5) = bigint_sqr(pt.x); - let (slope_y: UnreducedBigInt5) = bigint_mul(slope, pt.y); - verify_zero5( - UnreducedBigInt5( - d0=3 * x_sqr.d0 - 2 * slope_y.d0, - d1=3 * x_sqr.d1 - 2 * slope_y.d1, - d2=3 * x_sqr.d2 - 2 * slope_y.d2, - d3=3 * x_sqr.d3 - 2 * slope_y.d3, - d4=3 * x_sqr.d4 - 2 * slope_y.d4, - ), - ); - - return (slope=slope); - } - - // Returns the slope of the line connecting the two given points. - // The slope is used to compute pt0 + pt1. - // Assumption: pt0.x != pt1.x (mod field prime). - func compute_slope{range_check_ptr}(pt0: G1Point, pt1: G1Point) -> (slope: BigInt3) { - alloc_locals; - local slope: BigInt3; - %{ - from starkware.python.math_utils import div_mod - from src.hints.fq import bigint_pack, bigint_fill, get_p - assert 1 < ids.N_LIMBS <= 12 - p = get_p(ids) - x0 = bigint_pack(ids.pt0.x, ids.N_LIMBS, ids.BASE) - y0 = bigint_pack(ids.pt0.y, ids.N_LIMBS, ids.BASE) - x1 = bigint_pack(ids.pt1.x, ids.N_LIMBS, ids.BASE) - y1 = bigint_pack(ids.pt1.y, ids.N_LIMBS, ids.BASE) - - slope = div_mod(y0 - y1, x0 - x1, p) - - bigint_fill(slope, ids.slope, ids.N_LIMBS, ids.BASE) - %} - assert_reduced_felt(slope); - - let x_diff = BigInt3( - d0=pt0.x.d0 - pt1.x.d0, d1=pt0.x.d1 - pt1.x.d1, d2=pt0.x.d2 - pt1.x.d2 - ); - let (x_diff_slope: UnreducedBigInt5) = bigint_mul(x_diff, slope); - - verify_zero5( - UnreducedBigInt5( - d0=x_diff_slope.d0 - pt0.y.d0 + pt1.y.d0, - d1=x_diff_slope.d1 - pt0.y.d1 + pt1.y.d1, - d2=x_diff_slope.d2 - pt0.y.d2 + pt1.y.d2, - d3=x_diff_slope.d3, - d4=x_diff_slope.d4, - ), - ); - - return (slope,); - } - - // Given a point 'pt' on the elliptic curve, computes pt + pt. - func double{range_check_ptr}(pt: G1Point) -> (res: G1Point) { - alloc_locals; - if (pt.x.d0 == 0) { - if (pt.x.d1 == 0) { - if (pt.x.d2 == 0) { - return (pt,); - } - } - } - - let (slope: BigInt3) = compute_doubling_slope(pt); - let (slope_sqr: UnreducedBigInt5) = bigint_sqr(slope); - - local new_x: BigInt3; - local new_y: BigInt3; - %{ - from src.hints.fq import bigint_pack, bigint_fill, get_p - assert 1 < ids.N_LIMBS <= 12 - - p = get_p(ids) - x = bigint_pack(ids.pt.x, ids.N_LIMBS, ids.BASE) - y = bigint_pack(ids.pt.y, ids.N_LIMBS, ids.BASE) - slope = bigint_pack(ids.slope, ids.N_LIMBS, ids.BASE) - - new_x = (pow(slope, 2, p) - 2 * x) % p - new_y = (slope * (x - new_x) - y) % p - - bigint_fill(new_x, ids.new_x, ids.N_LIMBS, ids.BASE) - bigint_fill(new_y, ids.new_y, ids.N_LIMBS, ids.BASE) - %} - assert_reduced_felt(new_x); - assert_reduced_felt(new_y); - - verify_zero5( - UnreducedBigInt5( - d0=slope_sqr.d0 - new_x.d0 - 2 * pt.x.d0, - d1=slope_sqr.d1 - new_x.d1 - 2 * pt.x.d1, - d2=slope_sqr.d2 - new_x.d2 - 2 * pt.x.d2, - d3=slope_sqr.d3, - d4=slope_sqr.d4, - ), - ); - - let (x_diff_slope: UnreducedBigInt5) = bigint_mul( - BigInt3(d0=pt.x.d0 - new_x.d0, d1=pt.x.d1 - new_x.d1, d2=pt.x.d2 - new_x.d2), slope - ); - - verify_zero5( - UnreducedBigInt5( - d0=x_diff_slope.d0 - pt.y.d0 - new_y.d0, - d1=x_diff_slope.d1 - pt.y.d1 - new_y.d1, - d2=x_diff_slope.d2 - pt.y.d2 - new_y.d2, - d3=x_diff_slope.d3, - d4=x_diff_slope.d4, - ), - ); - - return (G1Point(new_x, new_y),); - } - - // Adds two points on the elliptic curve. - // Assumption: pt0.x != pt1.x (however, pt0 = pt1 = 0 is allowed). - // Note that this means that the function cannot be used if pt0 = pt1 - // (use ec_double() in this case) or pt0 = -pt1 (the result is 0 in this case). - func fast_ec_add{range_check_ptr}(pt0: G1Point, pt1: G1Point) -> (res: G1Point) { - alloc_locals; - if (pt0.x.d0 == 0) { - if (pt0.x.d1 == 0) { - if (pt0.x.d2 == 0) { - return (pt1,); - } - } - } - if (pt1.x.d0 == 0) { - if (pt1.x.d1 == 0) { - if (pt1.x.d2 == 0) { - return (pt0,); - } - } - } - - let (slope: BigInt3) = compute_slope(pt0, pt1); - let (slope_sqr: UnreducedBigInt5) = bigint_mul(slope, slope); - local new_x: BigInt3; - local new_y: BigInt3; - %{ - from src.hints.fq import bigint_pack, bigint_fill, get_p - assert 1 < ids.N_LIMBS <= 12 - p = get_p(ids) - x0 = bigint_pack(ids.pt0.x, ids.N_LIMBS, ids.BASE) - y0 = bigint_pack(ids.pt0.y, ids.N_LIMBS, ids.BASE) - x1 = bigint_pack(ids.pt1.x, ids.N_LIMBS, ids.BASE) - slope = bigint_pack(ids.slope, ids.N_LIMBS, ids.BASE) - - - new_x = (pow(slope, 2, p) - x0 - x1) % p - new_y = (slope * (x0 - new_x) - y0) % p - - bigint_fill(new_x, ids.new_x, ids.N_LIMBS, ids.BASE) - bigint_fill(new_y, ids.new_y, ids.N_LIMBS, ids.BASE) - %} - assert_reduced_felt(new_x); - assert_reduced_felt(new_y); - - verify_zero5( - UnreducedBigInt5( - d0=slope_sqr.d0 - new_x.d0 - pt0.x.d0 - pt1.x.d0, - d1=slope_sqr.d1 - new_x.d1 - pt0.x.d1 - pt1.x.d1, - d2=slope_sqr.d2 - new_x.d2 - pt0.x.d2 - pt1.x.d2, - d3=slope_sqr.d3, - d4=slope_sqr.d4, - ), - ); - - let (x_diff_slope: UnreducedBigInt5) = bigint_mul( - BigInt3(d0=pt0.x.d0 - new_x.d0, d1=pt0.x.d1 - new_x.d1, d2=pt0.x.d2 - new_x.d2), slope - ); - - verify_zero5( - UnreducedBigInt5( - d0=x_diff_slope.d0 - pt0.y.d0 - new_y.d0, - d1=x_diff_slope.d1 - pt0.y.d1 - new_y.d1, - d2=x_diff_slope.d2 - pt0.y.d2 - new_y.d2, - d3=x_diff_slope.d3, - d4=x_diff_slope.d4, - ), - ); - - return (G1Point(new_x, new_y),); - } - - // Same as fast_ec_add, except that the cases pt0 = ±pt1 are supported. - func add{range_check_ptr}(pt0: G1Point, pt1: G1Point) -> (res: G1Point) { - let x_diff = UnreducedBigInt3( - d0=pt0.x.d0 - pt1.x.d0, d1=pt0.x.d1 - pt1.x.d1, d2=pt0.x.d2 - pt1.x.d2 - ); - let (same_x: felt) = is_zero(x_diff); - if (same_x == 0) { - // pt0.x != pt1.x so we can use fast_ec_add. - return fast_ec_add(pt0, pt1); - } - - // We have pt0.x = pt1.x. This implies pt0.y = ±pt1.y. - // Check whether pt0.y = -pt1.y. - let y_sum = UnreducedBigInt3( - d0=pt0.y.d0 + pt1.y.d0, d1=pt0.y.d1 + pt1.y.d1, d2=pt0.y.d2 + pt1.y.d2 - ); - let (opposite_y: felt) = is_zero(y_sum); - if (opposite_y != 0) { - // pt0.y = -pt1.y. - // Note that the case pt0 = pt1 = 0 falls into this branch as well. - let ZERO_POINT = G1Point(BigInt3(0, 0, 0), BigInt3(0, 0, 0)); - return (ZERO_POINT,); - } else { - // pt0.y = pt1.y. - return double(pt0); - } - } - - // Given 0 <= m < 250, a scalar and a point on the elliptic curve, pt, - // verifies that 0 <= scalar < 2**m and returns (2**m * pt, scalar * pt). - func ec_mul_inner{range_check_ptr}(pt: G1Point, scalar: felt, m: felt) -> ( - pow2: G1Point, res: G1Point - ) { - if (m == 0) { - assert scalar = 0; - let ZERO_POINT = G1Point(BigInt3(0, 0, 0), BigInt3(0, 0, 0)); - return (pow2=pt, res=ZERO_POINT); - } - - alloc_locals; - let (double_pt: G1Point) = double(pt); - %{ memory[ap] = (ids.scalar % PRIME) % 2 %} - jmp odd if [ap] != 0, ap++; - return ec_mul_inner(pt=double_pt, scalar=scalar / 2, m=m - 1); - - odd: - let (local inner_pow2: G1Point, inner_res: G1Point) = ec_mul_inner( - pt=double_pt, scalar=(scalar - 1) / 2, m=m - 1 - ); - // Here inner_res = (scalar - 1) / 2 * double_pt = (scalar - 1) * pt. - // Assume pt != 0 and that inner_res = ±pt. We obtain (scalar - 1) * pt = ±pt => - // scalar - 1 = ±1 (mod N) => scalar = 0 or 2. - // In both cases (scalar - 1) / 2 cannot be in the range [0, 2**(m-1)), so we get a - // contradiction. - let (res: G1Point) = fast_ec_add(pt0=pt, pt1=inner_res); - return (pow2=inner_pow2, res=res); - } - - func scalar_mul{range_check_ptr}(pt: G1Point, scalar: BigInt3) -> (res: G1Point) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let (pow2_0: G1Point, local res0: G1Point) = ec_mul_inner(pt, scalar.d0, 86); - let (pow2_1: G1Point, local res1: G1Point) = ec_mul_inner(pow2_0, scalar.d1, 86); - let (_, local res2: G1Point) = ec_mul_inner(pow2_1, scalar.d2, 82); - let (res: G1Point) = add(res0, res1); - let (res: G1Point) = add(res, res2); - - return (res,); - } - func neg{range_check_ptr}(pt: G1Point) -> (res: G1Point) { - alloc_locals; - let y = fq_bigint3.neg(pt.y); - let res: G1Point = G1Point(pt.x, y); - return (res,); - } -} - -// CONSTANTS -func G1() -> (res: G1Point) { - return (res=G1Point(BigInt3(1, 0, 0), BigInt3(2, 0, 0))); -} diff --git a/archive_tmp/bn254/g2.cairo b/archive_tmp/bn254/g2.cairo deleted file mode 100644 index 005d4880..00000000 --- a/archive_tmp/bn254/g2.cairo +++ /dev/null @@ -1,460 +0,0 @@ -from src.bn254.towers.e2 import e2, E2 - -from starkware.cairo.common.cairo_secp.bigint import ( - BigInt3, - UnreducedBigInt3, - nondet_bigint3, - UnreducedBigInt5, - bigint_mul, - bigint_to_uint256, - uint256_to_bigint, -) - -from src.bn254.fq import fq_bigint3, is_zero, verify_zero5, assert_reduced_felt, reduce_3, reduce_5 -from src.bn254.curve import N_LIMBS, DEGREE, BASE, P0, P1, P2 -from src.bn254.g1 import G1Point -from starkware.cairo.common.registers import get_fp_and_pc -from src.bn254.towers.e12 import E12full034 - -struct G2Point { - x: E2*, - y: E2*, -} - -struct E4 { - r0: E2*, - r1: E2*, -} -namespace g2 { - func assert_on_curve{range_check_ptr}(pt: G2Point*) -> () { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - assert_reduced_felt(pt.x.a0); - assert_reduced_felt(pt.x.a1); - assert_reduced_felt(pt.y.a0); - assert_reduced_felt(pt.y.a1); - - let left = e2.mul(pt.y, pt.y); - let x_sq = e2.square(pt.x); - let x_cube = e2.mul(x_sq, pt.x); - local b20: BigInt3*; - local b21: BigInt3*; - %{ - from starkware.cairo.common.cairo_secp.secp_utils import split - ids.b20 = segments.gen_arg(split(19485874751759354771024239261021720505790618469301721065564631296452457478373)) - ids.b21 = segments.gen_arg(split(266929791119991161246907387137283842545076965332900288569378510910307636690)) - %} - local b2: E2 = E2([b20], [b21]); - let right = e2.add(x_cube, &b2); - - e2.assert_E2(left, right); - return (); - } - func neg{range_check_ptr}(pt: G2Point*) -> G2Point* { - alloc_locals; - let x = pt.x; - let y = e2.neg(pt.y); - tempvar res = new G2Point(x, y); - return res; - } - func compute_doubling_slope{range_check_ptr}(pt: G2Point*) -> E2* { - // Returns the slope of the elliptic curve at the given point. - // The slope is used to compute pt + pt. - // Assumption: pt != 0. - // Note that y cannot be zero: assume that it is, then pt = -pt, so 2 * pt = 0, which - // contradicts the fact that the size of the curve is odd. - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local slope_a0: BigInt3; - local slope_a1: BigInt3; - %{ - from src.hints.fq import bigint_pack, bigint_fill - from src.hints.e2 import E2 - assert 1 < ids.N_LIMBS <= 12 - p = get_p(ids) - x = E2(bigint_pack(ids.pt.x.a0, ids.N_LIMBS, ids.BASE), bigint_pack(ids.pt.x.a1, ids.N_LIMBS, ids.BASE), p) - y = E2(bigint_pack(ids.pt.y.a0, ids.N_LIMBS, ids.BASE), bigint_pack(ids.pt.y.a1, ids.N_LIMBS, ids.BASE), p) - value = (3 * x * x) / (2 * y) - bigint_fill(value.a0, ids.slope_a0, ids.N_LIMBS, ids.BASE) - bigint_fill(value.a1, ids.slope_a1, ids.N_LIMBS, ids.BASE) - %} - assert_reduced_felt(slope_a0); - assert_reduced_felt(slope_a1); - - let x0_x1: UnreducedBigInt5 = bigint_mul(pt.x.a0, pt.x.a1); - let x0_sqr: UnreducedBigInt5 = bigint_mul(pt.x.a0, pt.x.a0); - let x1_sqr: UnreducedBigInt5 = bigint_mul(pt.x.a1, pt.x.a1); - - let s0_y0: UnreducedBigInt5 = bigint_mul(slope_a0, pt.y.a0); - let s1_y1: UnreducedBigInt5 = bigint_mul(slope_a1, pt.y.a1); - - let s0_y1: UnreducedBigInt5 = bigint_mul(slope_a0, pt.y.a1); - let s1_y0: UnreducedBigInt5 = bigint_mul(slope_a1, pt.y.a0); - - tempvar TWO = 2; - tempvar THREE = 3; - // Verify real - verify_zero5( - UnreducedBigInt5( - d0=THREE * (x0_sqr.d0 - x1_sqr.d0) - TWO * (s0_y0.d0 - s1_y1.d0), - d1=THREE * (x0_sqr.d1 - x1_sqr.d1) - TWO * (s0_y0.d1 - s1_y1.d1), - d2=THREE * (x0_sqr.d2 - x1_sqr.d2) - TWO * (s0_y0.d2 - s1_y1.d2), - d3=THREE * (x0_sqr.d3 - x1_sqr.d3) - TWO * (s0_y0.d3 - s1_y1.d3), - d4=THREE * (x0_sqr.d4 - x1_sqr.d4) - TWO * (s0_y0.d4 - s1_y1.d4), - ), - ); - // Verify imaginary - verify_zero5( - UnreducedBigInt5( - d0=TWO * (THREE * x0_x1.d0 - s0_y1.d0 - s1_y0.d0), - d1=TWO * (THREE * x0_x1.d1 - s0_y1.d1 - s1_y0.d1), - d2=TWO * (THREE * x0_x1.d2 - s0_y1.d2 - s1_y0.d2), - d3=TWO * (THREE * x0_x1.d3 - s0_y1.d3 - s1_y0.d3), - d4=TWO * (THREE * x0_x1.d4 - s0_y1.d4 - s1_y0.d4), - ), - ); - - local slope: E2 = E2(a0=slope_a0, a1=slope_a1); - - return &slope; - } - // Returns the slope of the line connecting the two given points. - // The slope is used to compute pt0 + pt1. - // Assumption: pt0.x != pt1.x (mod field prime). - func compute_slope{range_check_ptr}(pt0: G2Point*, pt1: G2Point*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local slope_a0: BigInt3; - local slope_a1: BigInt3; - %{ - from src.hints.fq import bigint_pack, bigint_fill - from src.hints.e2 import E2 - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - p = get_p(ids) - x0 = E2(bigint_pack(ids.pt0.x.a0, ids.N_LIMBS, ids.BASE), bigint_pack(ids.pt0.x.a1, ids.N_LIMBS, ids.BASE), p) - y0 = E2(bigint_pack(ids.pt0.y.a0, ids.N_LIMBS, ids.BASE), bigint_pack(ids.pt0.y.a1, ids.N_LIMBS, ids.BASE), p) - x1 = E2(bigint_pack(ids.pt1.x.a0, ids.N_LIMBS, ids.BASE), bigint_pack(ids.pt1.x.a1, ids.N_LIMBS, ids.BASE), p) - y1 = E2(bigint_pack(ids.pt1.y.a0, ids.N_LIMBS, ids.BASE), bigint_pack(ids.pt1.y.a1, ids.N_LIMBS, ids.BASE), p) - - value = (y0 - y1) / (x0 - x1) - - bigint_fill(value.a0, ids.slope_a0, ids.N_LIMBS, ids.BASE) - bigint_fill(value.a1, ids.slope_a1, ids.N_LIMBS, ids.BASE) - %} - assert_reduced_felt(slope_a0); - assert_reduced_felt(slope_a1); - - tempvar x_diff_real: BigInt3 = BigInt3( - d0=pt0.x.a0.d0 - pt1.x.a0.d0, d1=pt0.x.a0.d1 - pt1.x.a0.d1, d2=pt0.x.a0.d2 - pt1.x.a0.d2 - ); - tempvar x_diff_imag: BigInt3 = BigInt3( - d0=pt0.x.a1.d0 - pt1.x.a1.d0, d1=pt0.x.a1.d1 - pt1.x.a1.d1, d2=pt0.x.a1.d2 - pt1.x.a1.d2 - ); - - let x_diff_slope_imag_first_term: UnreducedBigInt5 = bigint_mul(x_diff_real, slope_a1); - let x_diff_slope_imag_second_term: UnreducedBigInt5 = bigint_mul(x_diff_imag, slope_a0); - - let x_diff_real_first_term: UnreducedBigInt5 = bigint_mul(x_diff_real, slope_a0); - let x_diff_real_second_term: UnreducedBigInt5 = bigint_mul(x_diff_imag, slope_a1); - - verify_zero5( - UnreducedBigInt5( - d0=x_diff_slope_imag_first_term.d0 + x_diff_slope_imag_second_term.d0 - - pt0.y.a1.d0 + pt1.y.a1.d0, - d1=x_diff_slope_imag_first_term.d1 + x_diff_slope_imag_second_term.d1 - - pt0.y.a1.d1 + pt1.y.a1.d1, - d2=x_diff_slope_imag_first_term.d2 + x_diff_slope_imag_second_term.d2 - - pt0.y.a1.d2 + pt1.y.a1.d2, - d3=x_diff_slope_imag_first_term.d3 + x_diff_slope_imag_second_term.d3, - d4=x_diff_slope_imag_first_term.d4 + x_diff_slope_imag_second_term.d4, - ), - ); - - verify_zero5( - UnreducedBigInt5( - d0=x_diff_real_first_term.d0 - x_diff_real_second_term.d0 - pt0.y.a0.d0 + - pt1.y.a0.d0, - d1=x_diff_real_first_term.d1 - x_diff_real_second_term.d1 - pt0.y.a0.d1 + - pt1.y.a0.d1, - d2=x_diff_real_first_term.d2 - x_diff_real_second_term.d2 - pt0.y.a0.d2 + - pt1.y.a0.d2, - d3=x_diff_real_first_term.d3 - x_diff_real_second_term.d3, - d4=x_diff_real_first_term.d4 - x_diff_real_second_term.d4, - ), - ); - local slope: E2 = E2(a0=slope_a0, a1=slope_a1); - - return &slope; - } - - // DoubleStep doubles a point in affine coordinates, and evaluates the line in Miller loop - // https://eprint.iacr.org/2013/722.pdf (Section 4.3) - func double_step{range_check_ptr}(pt: G2Point*) -> (res: G2Point*, line_eval: E12full034*) { - alloc_locals; - - let (__fp__, _) = get_fp_and_pc(); - // assert_on_curve(pt); - // precomputations in p : - - // let xp_bar = fq_bigint3.neg(p.x); - // let yp_prime = fq_bigint3.inv(p.y); - // let xp_prime = fq_bigint3.mul(xp_bar, yp_prime); - // paper algo: - // let two_y = e2.double(pt.y); - // let A = e2.inv(two_y); - // let x_sq = e2.square(pt.x); - // tempvar three = new BigInt3(3, 0, 0); - // let B = e2.mul_by_element(three, x_sq); - // let C = e2.mul(A, B); // lamba : slope - let C = compute_doubling_slope(pt); - - // let D = e2.double(pt.x); - // let nx = e2.square(C); - // let nx = e2.sub(nx, D); - - let nx = e2.square_min_double(C, pt.x); - // let E = e2.mul(C, pt.x); - // let E = e2.sub(E, pt.y); - let E = e2.mul_sub(C, pt.x, pt.y); - - // let ny = e2.mul(C, nx); - // let ny = e2.sub(E, ny); - - let ny = e2.sub_mul(E, C, nx); - - // assert_on_curve(res); - - // let F = e2.mul_by_element(xp_prime, C); - // let G = e2.mul_by_element(yp_prime, E); - local res: G2Point = G2Point(nx, ny); - // local line_eval: E4 = E4(C, E); - tempvar nine = 9; - local line_eval034: E12full034 = E12full034( - w1=BigInt3( - C.a0.d0 - nine * C.a1.d0, C.a0.d1 - nine * C.a1.d1, C.a0.d2 - nine * C.a1.d2 - ), - w3=BigInt3( - E.a0.d0 - nine * E.a1.d0, E.a0.d1 - nine * E.a1.d1, E.a0.d2 - nine * E.a1.d2 - ), - w7=C.a1, - w9=E.a1, - ); - - return (&res, &line_eval034); - } - func add_step{range_check_ptr}(pt0: G2Point*, pt1: G2Point*) -> ( - res: G2Point*, line_eval: E12full034* - ) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // precomputations in p : - // let xp_bar = fq_bigint3.neg(p.x); - // let yp_prime = fq_bigint3.inv(p.y); - // let xp_prime = fq_bigint3.mul(xp_bar, yp_prime); - // paper algo: - - let C = compute_slope(pt0, pt1); - // let D = e2.add(pt0.x, pt1.x); - // let nx = e2.square(C); - // let nx = e2.sub(nx, D); - - let nx = e2.square_min_add(C, pt0.x, pt1.x); - - // let E = e2.mul(C, pt0.x); - // let E = e2.sub(E, pt0.y); - let E = e2.mul_sub(C, pt0.x, pt0.y); - // let ny = e2.mul(C, nx); - // let ny = e2.sub(E, ny); - let ny = e2.sub_mul(E, C, nx); - // assert_on_curve(res); - - // let F = e2.mul_by_element(xp_prime, C); - // let G = e2.mul_by_element(yp_prime, E); - // let one_e2 = e2.one(); - local res: G2Point = G2Point(nx, ny); - // local line_eval: E4 = E4(C, E); - tempvar nine = 9; - local line_eval034: E12full034 = E12full034( - w1=BigInt3( - C.a0.d0 - nine * C.a1.d0, C.a0.d1 - nine * C.a1.d1, C.a0.d2 - nine * C.a1.d2 - ), - w3=BigInt3( - E.a0.d0 - nine * E.a1.d0, E.a0.d1 - nine * E.a1.d1, E.a0.d2 - nine * E.a1.d2 - ), - w7=C.a1, - w9=E.a1, - ); - return (&res, &line_eval034); - } - // Computes (p1 + p2) + p1 instead of (p1+p1) + p2 - func double_and_add_step{range_check_ptr}(p1: G2Point*, p2: G2Point*) -> ( - res: G2Point*, l1: E12full034*, l2: E12full034* - ) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let lambda1 = compute_slope(p1, p2); - let x3 = e2.square_min_add(lambda1, p1.x, p2.x); - // omit y3 computation - - // compute lambda2= -lambda1 - ((2*p1.y) / (x3 - p1.x)) - - let n = e2.double(p1.y); - let d = e2.sub(x3, p1.x); - let lambda2 = e2.div(n, d); - let lambda2 = e2.add(lambda2, lambda1); - let lambda2 = e2.neg(lambda2); - - // compute x4 = lambda2^2 - x1 - x3 - let x4 = e2.square_min_add(lambda2, p1.x, x3); - - // compute y4 = lambda2 * (x1 - x4) - y1 - - let y4 = e2.mul_sub0_sub1(lambda2, p1.x, x4, p1.y); - - local res: G2Point = G2Point(x4, y4); - // assert_on_curve(&res); - - let l1r1 = e2.mul_sub(lambda1, p1.x, p1.y); - let l2r1 = e2.mul_sub(lambda2, p1.x, p1.y); - // local l1: E4 = E4(lambda1, l1r1); - // local l2: E4 = E4(lambda2, l2r1); - tempvar nine = 9; - local l1034: E12full034 = E12full034( - w1=BigInt3( - lambda1.a0.d0 - nine * lambda1.a1.d0, - lambda1.a0.d1 - nine * lambda1.a1.d1, - lambda1.a0.d2 - nine * lambda1.a1.d2, - ), - w3=BigInt3( - l1r1.a0.d0 - nine * l1r1.a1.d0, - l1r1.a0.d1 - nine * l1r1.a1.d1, - l1r1.a0.d2 - nine * l1r1.a1.d2, - ), - w7=lambda1.a1, - w9=l1r1.a1, - ); - - local l2034: E12full034 = E12full034( - w1=BigInt3( - lambda2.a0.d0 - nine * lambda2.a1.d0, - lambda2.a0.d1 - nine * lambda2.a1.d1, - lambda2.a0.d2 - nine * lambda2.a1.d2, - ), - w3=BigInt3( - l2r1.a0.d0 - nine * l2r1.a1.d0, - l2r1.a0.d1 - nine * l2r1.a1.d1, - l2r1.a0.d2 - nine * l2r1.a1.d2, - ), - w7=lambda2.a1, - w9=l2r1.a1, - ); - - return (&res, &l1034, &l2034); - } - func line_compute{range_check_ptr}(p1: G2Point*, p2: G2Point*) -> E12full034* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let lambda = compute_slope(p2, p1); - let l1r1 = e2.mul_sub(lambda, p1.x, p1.y); - // local l1: E4 = E4(lambda, l1r1); - tempvar nine = 9; - local l1034: E12full034 = E12full034( - w1=BigInt3( - lambda.a0.d0 - nine * lambda.a1.d0, - lambda.a0.d1 - nine * lambda.a1.d1, - lambda.a0.d2 - nine * lambda.a1.d2, - ), - w3=BigInt3( - l1r1.a0.d0 - nine * l1r1.a1.d0, - l1r1.a0.d1 - nine * l1r1.a1.d1, - l1r1.a0.d2 - nine * l1r1.a1.d2, - ), - w7=lambda.a1, - w9=l1r1.a1, - ); - return &l1034; - } -} - -// Returns the generator point of G2 -func get_g2_generator{range_check_ptr}() -> G2Point* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local g2x0: BigInt3; - local g2x1: BigInt3; - local g2y0: BigInt3; - local g2y1: BigInt3; - %{ - import subprocess - import functools - import re - from starkware.cairo.common.cairo_secp.secp_utils import split - - def rgetattr(obj, attr, *args): - def _getattr(obj, attr): - return getattr(obj, attr, *args) - return functools.reduce(_getattr, [obj] + attr.split('.')) - def rsetattr(obj, attr, val): - pre, _, post = attr.rpartition('.') - return setattr(rgetattr(obj, pre) if pre else obj, post, val) - def fill_element(element:str, value:int): - s = split(value) - for i in range(3): rsetattr(ids,element+'.d'+str(i),s[i]) - - fill_element('g2x0', 10857046999023057135944570762232829481370756359578518086990519993285655852781) - fill_element('g2x1', 11559732032986387107991004021392285783925812861821192530917403151452391805634) - fill_element('g2y0', 8495653923123431417604973247489272438418190587263600148770280649306958101930) - fill_element('g2y1', 4082367875863433681332203403145435568316851327593401208105741076214120093531) - %} - tempvar res: G2Point* = new G2Point(new E2(g2x0, g2x1), new E2(g2y0, g2y1)); - return res; -} - -func get_n_g2_generator{range_check_ptr}(n: felt) -> G2Point* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local g2x0: BigInt3; - local g2x1: BigInt3; - local g2y0: BigInt3; - local g2y1: BigInt3; - %{ - from starkware.cairo.common.cairo_secp.secp_utils import split - import subprocess - import functools - import re - def rgetattr(obj, attr, *args): - def _getattr(obj, attr): - return getattr(obj, attr, *args) - return functools.reduce(_getattr, [obj] + attr.split('.')) - def rsetattr(obj, attr, val): - pre, _, post = attr.rpartition('.') - return setattr(rgetattr(obj, pre) if pre else obj, post, val) - def parse_fp_elements(input_string:str): - pattern = re.compile(r'\[([^\[\]]+)\]') - substrings = pattern.findall(input_string) - sublists = [substring.split(' ') for substring in substrings] - sublists = [[int(x) for x in sublist] for sublist in sublists] - fp_elements = [x[0] + x[1]*2**64 + x[2]*2**128 + x[3]*2**192 for x in sublists] - return fp_elements - def fill_element(element:str, value:int): - s = split(value) - for i in range(3): rsetattr(ids,element+'.d'+str(i),s[i]) - - cmd = ['./tools/gnark/main', 'nG1nG2', '1', str(ids.n)] - out = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8') - fp_elements = parse_fp_elements(out) - assert len(fp_elements) == 6 - - fill_element('g2x0', fp_elements[2]) - fill_element('g2x1', fp_elements[3]) - fill_element('g2y0', fp_elements[4]) - fill_element('g2y1', fp_elements[5]) - %} - tempvar res: G2Point* = new G2Point(new E2(g2x0, g2x1), new E2(g2y0, g2y1)); - return res; -} diff --git a/archive_tmp/bn254/pairing.cairo b/archive_tmp/bn254/pairing.cairo deleted file mode 100644 index 90caa74e..00000000 --- a/archive_tmp/bn254/pairing.cairo +++ /dev/null @@ -1,912 +0,0 @@ -from starkware.cairo.common.registers import get_label_location, get_fp_and_pc -from starkware.cairo.common.alloc import alloc -from starkware.cairo.common.math import assert_nn, unsigned_div_rem as felt_divmod, assert_nn_le -from src.bn254.g1 import G1Point, g1 -from src.bn254.g2 import G2Point, g2, E4 -from src.bn254.curve import P0, P1, P2 -from src.bn254.towers.e12 import ( - E12, - e12, - E12D, - E12DU, - E11DU, - E9full, - E7full, - E12full034, - E12full01234, - e12_tricks, - w_to_gnark_reduced, - PolyAcc12, - PolyAcc034, - PolyAcc034034, - get_powers_of_z11, - ZPowers11, - eval_irreducible_poly12, - eval_E11, - eval_E12_unreduced, -) -from src.bn254.towers.e2 import E2, e2 -from src.bn254.towers.e6 import ( - E6, - E6full, - E6DirectUnreduced, - E5full, - e6, - div_trick_e6, - gnark_to_v, - v_to_gnark_reduced as v_to_gnark, - gnark_to_v_reduced, - PolyAcc6, - PolyAccSquare6, - get_powers_of_z5, - eval_E6_plus_v_unreduced, - eval_E5, - eval_irreducible_poly6, -) -from src.bn254.fq import ( - BigInt3, - fq_bigint3, - felt_to_bigint3, - fq_zero, - BASE, - N_LIMBS, - assert_reduced_felt, - Uint256, - UnreducedBigInt5, - UnreducedBigInt3, - bigint_mul, - verify_zero5, -) - -from starkware.cairo.common.cairo_builtins import PoseidonBuiltin, BitwiseBuiltin -from starkware.cairo.common.builtin_poseidon.poseidon import poseidon_hash - -from src.extension_field_tricks.fp12 import verify_12th_extension_tricks -from src.extension_field_tricks.fp6 import verify_6th_extension_tricks - -const ate_loop_count = 29793968203157093288; -const log_ate_loop_count = 63; -const naf_count = 66; - -func pair{range_check_ptr, bitwise_ptr: BitwiseBuiltin*, poseidon_ptr: PoseidonBuiltin*}( - P: G1Point, Q: G2Point -) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - g1.assert_on_curve(&P); - g2.assert_on_curve(&Q); - let (P_arr: G1Point**) = alloc(); - let (Q_arr: G2Point**) = alloc(); - assert P_arr[0] = &P; - assert Q_arr[0] = &Q; - let f = multi_miller_loop(P_arr, Q_arr, 1); - let ff = final_exponentiation(f, 1); - return ff; -} - -func pair_multi{range_check_ptr, bitwise_ptr: BitwiseBuiltin*, poseidon_ptr: PoseidonBuiltin*}( - P_arr: G1Point**, Q_arr: G2Point**, mul_by_GT: E12D, n: felt -) -> E12D* { - alloc_locals; - assert_nn_le(2, n); - multi_assert_on_curve(P_arr, Q_arr, n - 1); - let f = multi_miller_loop(P_arr, Q_arr, n); - let ff = final_exponentiation(f, 0); - return ff; -} - -func multi_assert_on_curve{range_check_ptr}(P_arr: G1Point**, Q_arr: G2Point**, index: felt) { - if (index == -1) { - return (); - } else { - g1.assert_on_curve(P_arr[index]); - g2.assert_on_curve(Q_arr[index]); - return multi_assert_on_curve(P_arr, Q_arr, index - 1); - } -} - -// ref : https://eprint.iacr.org/2022/1162 by @yelhousni -func multi_miller_loop{ - range_check_ptr, bitwise_ptr: BitwiseBuiltin*, poseidon_ptr: PoseidonBuiltin* -}(P: G1Point**, Q: G2Point**, n_points: felt) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - assert_nn(n_points); - - let (local Qacc: G2Point**) = alloc(); - let (local Q_neg: G2Point**) = alloc(); - let (local yInv: BigInt3*) = alloc(); - let (local xOverY: BigInt3*) = alloc(); - - with Qacc, Q_neg, yInv, xOverY { - initialize_arrays_and_constants(P, Q, n_points, 0); - } - - tempvar offset = n_points; - - let zero_fq: BigInt3 = fq_zero(); - let zero_fq12: E12D = e12.zero_full(); - - local poly_acc_12_f: PolyAcc12 = PolyAcc12( - xy=UnreducedBigInt3(0, 0, 0), q=[cast(&zero_fq12, E11DU*)], r=[cast(&zero_fq12, E12DU*)] - ); - local poly_acc_034_f: PolyAcc034 = PolyAcc034( - xy=UnreducedBigInt3(0, 0, 0), q=[cast(&zero_fq12, E9full*)], r=[cast(&zero_fq12, E12DU*)] - ); - local poly_acc_034034_f: PolyAcc034034 = PolyAcc034034( - xy=UnreducedBigInt3(0, 0, 0), - q=[cast(&zero_fq12, E7full*)], - r=[cast(&zero_fq12, E12full01234*)], - ); - let poly_acc_12 = &poly_acc_12_f; - let poly_acc_034 = &poly_acc_034_f; - let poly_acc_034034 = &poly_acc_034034_f; - let continuable_hash = 'GaragaBN254MillerLoop'; - local Z: BigInt3; - %{ - from src.bn254.pairing_multi_miller import multi_miller_loop, G1Point, G2Point, E2 - from starkware.cairo.common.math_utils import as_int - from src.hints.fq import get_p - n_points = ids.n_points - P_arr = [[0, 0] for _ in range(n_points)] - Q_arr = [([0, 0], [0, 0]) for _ in range(n_points)] - p = get_p(ids) - for i in range(n_points): - P_pt_ptr = memory[ids.P+i] - Q_pt_ptr = memory[ids.Q+i] - Q_x_ptr = memory[Q_pt_ptr] - Q_y_ptr = memory[Q_pt_ptr+1] - - for k in range(ids.N_LIMBS): - P_arr[i][0] = P_arr[i][0] + as_int(memory[P_pt_ptr+k], PRIME) * ids.BASE**k - P_arr[i][1] = P_arr[i][1] + as_int(memory[P_pt_ptr+ids.N_LIMBS+k], PRIME) * ids.BASE**k - Q_arr[i][0][0] = Q_arr[i][0][0] + as_int(memory[Q_x_ptr+k], PRIME) * ids.BASE**k - Q_arr[i][0][1] = Q_arr[i][0][1] + as_int(memory[Q_x_ptr+ids.N_LIMBS+k], PRIME) * ids.BASE**k - Q_arr[i][1][0] = Q_arr[i][1][0] + as_int(memory[Q_y_ptr+k], PRIME) * ids.BASE**k - Q_arr[i][1][1] = Q_arr[i][1][1] + as_int(memory[Q_y_ptr+ids.N_LIMBS+k], PRIME) * ids.BASE**k - P_arr = [G1Point(*P) for P in P_arr] - Q_arr = [G2Point(E2(*Q[0], p), E2(*Q[1], p)) for Q in Q_arr] - - print("Pre-computing miller loop hash commitment Z = poseidon('GaragaBN254MillerLoop', [(A1, B1, Q1, R1), ..., (An, Bn, Qn, Rn)])") - x, Z = multi_miller_loop(P_arr, Q_arr, ids.n_points, ids.continuable_hash) - Z_bigint3 = split(Z) - ids.Z.d0, ids.Z.d1, ids.Z.d2 = Z_bigint3[0], Z_bigint3[1], Z_bigint3[2] - %} - let z_pow1_11_ptr: ZPowers11* = get_powers_of_z11(Z); - let (_, n_is_odd) = felt_divmod(n_points, 2); - - // Compute ∏ᵢ { fᵢ_{6x₀+2,Q}(P) } - // i = 64, separately to avoid an E12 Square - // (Square(res) = 1² = 1) - - // k = 0, separately to avoid MulBy034 (res × ℓ) - // (assign line to res) - let (new_Q0: G2Point*, l1: E12full034*) = g2.double_step(Qacc[0]); - assert Qacc[offset + 0] = new_Q0; - let res_w1 = fq_bigint3.mul(xOverY[0], l1.w1); - let res_w3 = fq_bigint3.mul(yInv[0], l1.w3); - let res_w7 = fq_bigint3.mul(xOverY[0], l1.w7); - let res_w9 = fq_bigint3.mul(yInv[0], l1.w9); - - local res_init: E12full034 = E12full034(res_w1, res_w3, res_w7, res_w9); - with Qacc, Q_neg, xOverY, yInv, n_is_odd, continuable_hash, z_pow1_11_ptr, poly_acc_12, poly_acc_034, poly_acc_034034 { - let res = i64_loop(1, offset, n_points, cast(&res_init, E12D*)); - let res = e12_tricks.square(res); - // i = 63, separately to avoid a doubleStep - // (at this point Qacc = 2Q, so 2Qacc-Q=3Q is equivalent to Qacc+Q=3Q - // this means doubleAndAddStep is equivalent to addStep here) - let res = i63_loop(0, n_points, offset, res); - let offset = offset + n_points; - let (res, offset) = multi_miller_loop_inner(n_points, 62, offset, res); - - let res = final_loop(0, n_points, offset, res); - // %{ print(f"HASH : {ids.continuable_hash}") %} - let (local Z: BigInt3) = felt_to_bigint3(continuable_hash); - %{ print("Verifying Miller Loop hash commitment Z = continuable_hash ... ") %} - assert Z.d0 - z_pow1_11_ptr.z_1.d0 = 0; - assert Z.d1 - z_pow1_11_ptr.z_1.d1 = 0; - assert Z.d2 - z_pow1_11_ptr.z_1.d2 = 0; - %{ print("Verifying Σc_i*A_i(z)*B_i(z) == P(z)Σc_i*Q_i(z) + Σc_i*R_i(z)") %} - verify_12th_extension_tricks(); - } - %{ print("Ok! \n") %} - - let res_gnark = w_to_gnark_reduced([res]); - // %{ - // print("RESFINALMILLERLOOP:") - // print_e12(ids.res_gnark) - // %} - return res_gnark; -} -func final_loop{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - Qacc: G2Point**, - xOverY: BigInt3*, - yInv: BigInt3*, - poly_acc_034034: PolyAcc034034*, - poly_acc_12: PolyAcc12*, - continuable_hash: felt, - z_pow1_11_ptr: ZPowers11*, -}(k: felt, n_points: felt, offset: felt, res: E12D*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - if (k == n_points) { - return res; - } else { - let q1x = e2.conjugate(Qacc[k].x); - let q1y = e2.conjugate(Qacc[k].y); - let q1x = e2.mul_by_non_residue_1_power_2(q1x); - let q1y = e2.mul_by_non_residue_1_power_3(q1y); - local Q1: G2Point = G2Point(q1x, q1y); - - let q2x = e2.mul_by_non_residue_2_power_2(Qacc[k].x); - let q2y = e2.mul_by_non_residue_2_power_3(Qacc[k].y); - let q2y = e2.neg(q2y); - local Q2: G2Point = G2Point(q2x, q2y); - - let (new_Q: G2Point*, l1: E12full034*) = g2.add_step(Qacc[offset + k], &Q1); - assert Qacc[offset + n_points + k] = new_Q; - - let l1w1 = fq_bigint3.mul(xOverY[k], l1.w1); - let l1w3 = fq_bigint3.mul(yInv[k], l1.w3); - let l1w7 = fq_bigint3.mul(xOverY[k], l1.w7); - let l1w9 = fq_bigint3.mul(yInv[k], l1.w9); - local l1f: E12full034 = E12full034(l1w1, l1w3, l1w7, l1w9); - - let l2 = g2.line_compute(Qacc[offset + n_points + k], &Q2); - let l2w1 = fq_bigint3.mul(xOverY[k], l2.w1); - let l2w3 = fq_bigint3.mul(yInv[k], l2.w3); - let l2w7 = fq_bigint3.mul(xOverY[k], l2.w7); - let l2w9 = fq_bigint3.mul(yInv[k], l2.w9); - local l2f: E12full034 = E12full034(l2w1, l2w3, l2w7, l2w9); - - let prod_lines = e12_tricks.mul034_034(&l1f, &l2f); - let res = e12_tricks.mul01234(res, prod_lines); - - return final_loop(k + 1, n_points, offset, res); - } -} - -func multi_miller_loop_inner{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - Qacc: G2Point**, - Q_neg: G2Point**, - xOverY: BigInt3*, - yInv: BigInt3*, - n_is_odd: felt, - poly_acc_12: PolyAcc12*, - poly_acc_034: PolyAcc034*, - poly_acc_034034: PolyAcc034034*, - continuable_hash: felt, - z_pow1_11_ptr: ZPowers11*, -}(n_points: felt, bit_index: felt, offset: felt, res: E12D*) -> (res: E12D*, offset: felt) { - alloc_locals; - let res = e12_tricks.square(res); - - if (bit_index == 0) { - // get_NAF_digit(0) = 0 - let (lines: E12full034**) = alloc(); - // %{ print(f"index = {ids.bit_index}, bit = {ids.bit_index}, offset = {ids.offset}") %} - - double_step_loop(0, n_points, offset, lines); - tempvar offset = offset + n_points; - if (n_is_odd != 0) { - let res = e12_tricks.mul034(res, lines[n_points - 1]); - let res = mul_lines_two_by_two(1, n_points, lines, res); - return (res, offset); - } else { - let res = mul_lines_two_by_two(1, n_points, lines, res); - return (res, offset); - } - } else { - let bit = get_NAF_digit(bit_index); - // %{ print(f"index = {ids.bit_index}, bit = {ids.bit}, offset = {ids.offset}") %} - if (bit == 0) { - let (lines: E12full034**) = alloc(); - - double_step_loop(0, n_points, offset, lines); - tempvar offset = offset + n_points; - - if (n_is_odd != 0) { - let res = e12_tricks.mul034(res, lines[n_points - 1]); - let res = mul_lines_two_by_two(1, n_points, lines, res); - return multi_miller_loop_inner(n_points, bit_index - 1, offset, res); - } else { - let res = mul_lines_two_by_two(1, n_points, lines, res); - return multi_miller_loop_inner(n_points, bit_index - 1, offset, res); - } - } else { - if (bit == 1) { - let res = bit_1_loop(0, n_points, offset, res); - tempvar offset = offset + n_points; - return multi_miller_loop_inner(n_points, bit_index - 1, offset, res); - } else { - // (bit == 2) - let res = bit_2_loop(0, n_points, offset, res); - let offset = offset + n_points; - return multi_miller_loop_inner(n_points, bit_index - 1, offset, res); - } - } - } -} - -func bit_1_loop{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - Qacc: G2Point**, - xOverY: BigInt3*, - yInv: BigInt3*, - poly_acc_034034: PolyAcc034034*, - poly_acc_12: PolyAcc12*, - continuable_hash: felt, - z_pow1_11_ptr: ZPowers11*, -}(k: felt, n_points: felt, offset: felt, res: E12D*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - if (k == n_points) { - return res; - } else { - let (new_Q: G2Point*, l1: E12full034*, l2: E12full034*) = g2.double_and_add_step( - Qacc[offset + k], Qacc[k] - ); - assert Qacc[offset + n_points + k] = new_Q; - - let l1w1 = fq_bigint3.mul(xOverY[k], l1.w1); - let l1w3 = fq_bigint3.mul(yInv[k], l1.w3); - let l1w7 = fq_bigint3.mul(xOverY[k], l1.w7); - let l1w9 = fq_bigint3.mul(yInv[k], l1.w9); - local l1f: E12full034 = E12full034(l1w1, l1w3, l1w7, l1w9); - - let l2w1 = fq_bigint3.mul(xOverY[k], l2.w1); - let l2w3 = fq_bigint3.mul(yInv[k], l2.w3); - let l2w7 = fq_bigint3.mul(xOverY[k], l2.w7); - let l2w9 = fq_bigint3.mul(yInv[k], l2.w9); - local l2f: E12full034 = E12full034(l2w1, l2w3, l2w7, l2w9); - let prod_lines = e12_tricks.mul034_034(&l1f, &l2f); - - let res = e12_tricks.mul01234(res, prod_lines); - let res = bit_1_loop(k + 1, n_points, offset, res); - return res; - } -} - -func bit_2_loop{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - Qacc: G2Point**, - Q_neg: G2Point**, - xOverY: BigInt3*, - yInv: BigInt3*, - poly_acc_034034: PolyAcc034034*, - poly_acc_12: PolyAcc12*, - continuable_hash: felt, - z_pow1_11_ptr: ZPowers11*, -}(k: felt, n_points: felt, offset: felt, res: E12D*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - if (k == n_points) { - return res; - } else { - let (new_Q: G2Point*, l1: E12full034*, l2: E12full034*) = g2.double_and_add_step( - Qacc[offset + k], Q_neg[k] - ); - assert Qacc[offset + n_points + k] = new_Q; - - let l1w1 = fq_bigint3.mul(xOverY[k], l1.w1); - let l1w3 = fq_bigint3.mul(yInv[k], l1.w3); - let l1w7 = fq_bigint3.mul(xOverY[k], l1.w7); - let l1w9 = fq_bigint3.mul(yInv[k], l1.w9); - local l1f: E12full034 = E12full034(l1w1, l1w3, l1w7, l1w9); - - let l2w1 = fq_bigint3.mul(xOverY[k], l2.w1); - let l2w3 = fq_bigint3.mul(yInv[k], l2.w3); - let l2w7 = fq_bigint3.mul(xOverY[k], l2.w7); - let l2w9 = fq_bigint3.mul(yInv[k], l2.w9); - local l2f: E12full034 = E12full034(l2w1, l2w3, l2w7, l2w9); - let prod_lines = e12_tricks.mul034_034(&l1f, &l2f); - - let res = e12_tricks.mul01234(res, prod_lines); - let res = bit_2_loop(k + 1, n_points, offset, res); - return res; - } -} -func i63_loop{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - Qacc: G2Point**, - Q_neg: G2Point**, - xOverY: BigInt3*, - yInv: BigInt3*, - poly_acc_034034: PolyAcc034034*, - poly_acc_12: PolyAcc12*, - continuable_hash: felt, - z_pow1_11_ptr: ZPowers11*, -}(k: felt, n_points: felt, offset: felt, res: E12D*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - if (k == n_points) { - return res; - } else { - // l2 the line passing Qacc[k] and -Q - let l2: E12full034* = g2.line_compute(Qacc[offset + k], Q_neg[k]); - // line evaluation at P[k] - - let l2w1 = fq_bigint3.mul(xOverY[k], l2.w1); - let l2w3 = fq_bigint3.mul(yInv[k], l2.w3); - let l2w7 = fq_bigint3.mul(xOverY[k], l2.w7); - let l2w9 = fq_bigint3.mul(yInv[k], l2.w9); - local l2f: E12full034 = E12full034(l2w1, l2w3, l2w7, l2w9); - // Qacc[k] ← Qacc[k]+Q[k] and - // l1 the line ℓ passing Qacc[k] and Q[k] - - let (new_Q: G2Point*, l1: E12full034*) = g2.add_step(Qacc[offset + k], Qacc[k]); - assert Qacc[offset + n_points + k] = new_Q; - - // line evaluation at P[k] - let l1w1 = fq_bigint3.mul(xOverY[k], l1.w1); - let l1w3 = fq_bigint3.mul(yInv[k], l1.w3); - let l1w7 = fq_bigint3.mul(xOverY[k], l1.w7); - let l1w9 = fq_bigint3.mul(yInv[k], l1.w9); - local l1f: E12full034 = E12full034(l1w1, l1w3, l1w7, l1w9); - - // l*l - let prod_lines = e12_tricks.mul034_034(&l1f, &l2f); - - // res = res * l*l - let res = e12_tricks.mul01234(res, prod_lines); - - return i63_loop(k + 1, n_points, offset, res); - } -} - -// Double step Qacc[offset+k] for k in [0, n_points) -// Store the doubled point in Qacc[offset+n_points+k] -// Store the line evaluation at P[k] in lines_r0[k] and lines_r1[k] -func double_step_loop{range_check_ptr, Qacc: G2Point**, xOverY: BigInt3*, yInv: BigInt3*}( - k: felt, n_points: felt, offset: felt, lines: E12full034** -) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - if (k == n_points) { - return (); - } else { - let (new_Q: G2Point*, l1: E12full034*) = g2.double_step(Qacc[offset + k]); - assert Qacc[offset + n_points + k] = new_Q; - - let l1w1 = fq_bigint3.mul(xOverY[k], l1.w1); - let l1w3 = fq_bigint3.mul(yInv[k], l1.w3); - let l1w7 = fq_bigint3.mul(xOverY[k], l1.w7); - let l1w9 = fq_bigint3.mul(yInv[k], l1.w9); - local l1f: E12full034 = E12full034(l1w1, l1w3, l1w7, l1w9); - assert lines[k] = &l1f; - return double_step_loop(k + 1, n_points, offset, lines); - } -} - -func i64_loop{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - Qacc: G2Point**, - xOverY: BigInt3*, - yInv: BigInt3*, - poly_acc_034: PolyAcc034*, - poly_acc_034034: PolyAcc034034*, - continuable_hash: felt, - z_pow1_11_ptr: ZPowers11*, -}(k: felt, offset: felt, n_points: felt, res: E12D*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - if (k == n_points) { - if (n_points == 1) { - local rest: E12D = E12D( - BigInt3(1, 0, 0), - res.w0, - BigInt3(0, 0, 0), - res.w1, - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - res.w2, - BigInt3(0, 0, 0), - res.w3, - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - ); - return &rest; - } else { - return res; - } - } else { - let (new_Q: G2Point*, l1: E12full034*) = g2.double_step(Qacc[k]); - assert Qacc[offset + k] = new_Q; - let l1w1 = fq_bigint3.mul(xOverY[k], l1.w1); - let l1w3 = fq_bigint3.mul(yInv[k], l1.w3); - let l1w7 = fq_bigint3.mul(xOverY[k], l1.w7); - let l1w9 = fq_bigint3.mul(yInv[k], l1.w9); - local l1f: E12full034 = E12full034(l1w1, l1w3, l1w7, l1w9); - if (k == 1) { - // k = 1, separately to avoid MulBy034 (res × ℓ) - // (res is also a line at this point, so we use Mul034By034 ℓ × ℓ) - - let res_t01234 = e12_tricks.mul034_034(&l1f, cast(res, E12full034*)); - local rest: E12D = E12D( - res_t01234.w0, - res_t01234.w1, - res_t01234.w2, - res_t01234.w3, - res_t01234.w4, - BigInt3(0, 0, 0), - res_t01234.w6, - res_t01234.w7, - res_t01234.w8, - res_t01234.w9, - res_t01234.w10, - res_t01234.w11, - ); - return i64_loop(k + 1, offset, n_points, &rest); - } else { - let res = e12_tricks.mul034(res, &l1f); - return i64_loop(k + 1, offset, n_points, res); - } - } -} - -func mul_lines_two_by_two{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - poly_acc_034034: PolyAcc034034*, - poly_acc_12: PolyAcc12*, - continuable_hash: felt, - z_pow1_11_ptr: ZPowers11*, -}(k: felt, n: felt, lines: E12full034**, res: E12D*) -> E12D* { - alloc_locals; - if (k == n) { - return res; - } else { - if (k == n + 1) { - return res; - } else { - let prod_lines = e12_tricks.mul034_034(lines[k], lines[k - 1]); - let res = e12_tricks.mul01234(res, prod_lines); - return mul_lines_two_by_two(k + 2, n, lines, res); - } - } -} -func initialize_arrays_and_constants{ - range_check_ptr, Qacc: G2Point**, Q_neg: G2Point**, yInv: BigInt3*, xOverY: BigInt3* -}(P: G1Point**, Q: G2Point**, n_points: felt, k: felt) { - alloc_locals; - if (k == n_points) { - return (); - } else { - let neg_Q = g2.neg(Q[k]); - let y_inv = fq_bigint3.inv(P[k].y); - let x_over_y = fq_bigint3.mul(P[k].x, y_inv); - let x_over_y = fq_bigint3.neg(x_over_y); - assert Qacc[k] = Q[k]; - assert Q_neg[k] = neg_Q; - assert yInv[k] = y_inv; - assert xOverY[k] = x_over_y; - return initialize_arrays_and_constants(P, Q, n_points, k + 1); - } -} - -func final_exponentiation{ - range_check_ptr, bitwise_ptr: BitwiseBuiltin*, poseidon_ptr: PoseidonBuiltin* -}(z: E12*, unsafe: felt) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let one: E6* = e6.one(); - local one_full: E6full = E6full( - BigInt3(1, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - ); - local z_c1: E6*; - local selector1: felt; - - if (unsafe != 0) { - assert z_c1 = z.c1; - } else { - let z_c1_is_zero = e6.is_zero(z.c1); - assert selector1 = z_c1_is_zero; - if (z_c1_is_zero != 0) { - assert z_c1 = one; - } else { - assert z_c1 = z.c1; - } - } - tempvar continuable_hash = 'GaragaBN254FinalExp'; - local Z: BigInt3; - %{ - from src.bn254.pairing_final_exp import final_exponentiation - from starkware.cairo.common.math_utils import as_int - from tools.py.extension_trick import pack_e12 - f_input = 12*[0] - input_refs =[ids.z.c0.b0.a0, ids.z.c0.b0.a1, ids.z.c0.b1.a0, ids.z.c0.b1.a1, ids.z.c0.b2.a0, ids.z.c0.b2.a1, - ids.z.c1.b0.a0, ids.z.c1.b0.a1, ids.z.c1.b1.a0, ids.z.c1.b1.a1, ids.z.c1.b2.a0, ids.z.c1.b2.a1] - - for i in range(ids.N_LIMBS): - for k in range(12): - f_input[k] += as_int(getattr(input_refs[k], "d" + str(i)), PRIME) * ids.BASE**i - f_input = pack_e12(f_input) - print("Pre-computing final exp hash commitment Z = poseidon('GaragaBN254FinalExp', [(A1, B1, Q1, R1), ..., (An, Bn, Qn, Rn)])") - _, Z = final_exponentiation(f_input, unsafe=ids.unsafe, continuable_hash=ids.continuable_hash) - Z_bigint3 = split(Z) - ids.Z.d0, ids.Z.d1, ids.Z.d2 = Z_bigint3[0], Z_bigint3[1], Z_bigint3[2] - %} - assert_reduced_felt(Z); - local zero_e6full: E6DirectUnreduced = E6DirectUnreduced( - UnreducedBigInt3(0, 0, 0), - UnreducedBigInt3(0, 0, 0), - UnreducedBigInt3(0, 0, 0), - UnreducedBigInt3(0, 0, 0), - UnreducedBigInt3(0, 0, 0), - UnreducedBigInt3(0, 0, 0), - ); - - local zero_e5full: E5full = E5full( - Uint256(0, 0), Uint256(0, 0), Uint256(0, 0), Uint256(0, 0), Uint256(0, 0) - ); - - local zero_bigint3: UnreducedBigInt3 = UnreducedBigInt3(0, 0, 0); - local poly_acc_f: PolyAcc6 = PolyAcc6(xy=zero_bigint3, q=zero_e5full, r=zero_e6full); - local poly_acc_sq_f: PolyAccSquare6 = PolyAccSquare6(xy=zero_bigint3, q=zero_e5full, r=0); - let poly_acc_sq = &poly_acc_sq_f; - let poly_acc = &poly_acc_f; - let z_pow1_5_ptr = get_powers_of_z5(Z); - with continuable_hash, poly_acc, poly_acc_sq, z_pow1_5_ptr { - // Torus compression absorbed: - // Raising e to (p⁶-1) is - // e^(p⁶) / e = (e.C0 - w*e.C1) / (e.C0 + w*e.C1) - // = (-e.C0/e.C1 + w) / (-e.C0/e.C1 - w) - // So the fraction -e.C0/e.C1 is already in the torus. - // This absorbs the torus compression in the easy part. - - let c_num_full = gnark_to_v_reduced(z.c0); - let c_num_full = e6.neg_full(c_num_full); - let z_c1_full = gnark_to_v_reduced(z_c1); - - let c = div_trick_e6(c_num_full, z_c1_full); - let t0 = e6.frobenius_square_torus_full(c); - let c = e6.mul_torus(t0, c); - // 2. Hard part (up to permutation) - // 2x₀(6x₀²+3x₀+1)(p⁴-p²+1)/r - // Duquesne and Ghammam - // https://eprint.iacr.org/2015/192.pdf - // Fuentes et al. (alg. 6) - // performed in torus compressed form - - let t0 = e6.expt_torus(c); - let t0 = e6.inverse_torus(t0); - let t0 = e6.square_torus(t0); - let t1 = e6.square_torus(t0); - let t1 = e6.mul_torus(t0, t1); - let t2 = e6.expt_torus(t1); - let t2 = e6.inverse_torus(t2); - let t3 = e6.inverse_torus(t1); - let t1 = e6.mul_torus(t2, t3); - let t3 = e6.square_torus(t2); - let t4 = e6.expt_torus(t3); - let t4 = e6.mul_torus(t1, t4); - let t3 = e6.mul_torus(t0, t4); - let t0 = e6.mul_torus(t2, t4); - let t0 = e6.mul_torus(c, t0); - let t2 = e6.frobenius_torus_full(t3); - let t0 = e6.mul_torus(t2, t0); - let t2 = e6.frobenius_square_torus_full(t4); - let t0 = e6.mul_torus(t2, t0); - let t2 = e6.inverse_torus(c); - let t2 = e6.mul_torus(t2, t3); - let t2 = e6.frobenius_cube_torus_full(t2); - - local final_res: E12D*; - if (unsafe != 0) { - let rest = e6.mul_torus(t2, t0); - let res = decompress_torus_full(rest); - assert final_res = res; - tempvar range_check_ptr = range_check_ptr; - tempvar bitwise_ptr = bitwise_ptr; - tempvar poseidon_ptr = poseidon_ptr; - tempvar continuable_hash = continuable_hash; - tempvar poly_acc = poly_acc; - tempvar poly_acc_sq = poly_acc_sq; - tempvar z_pow1_5_ptr = z_pow1_5_ptr; - } else { - let _sum = e6.add_full(t0, t2); - let is_zero = e6.is_zero_full(_sum); - local t0t: E6full*; - if (is_zero != 0) { - assert t0t = &one_full; - } else { - assert t0t = t0; - } - - if (selector1 == 0) { - if (is_zero == 0) { - let rest = e6.mul_torus(t2, t0t); - let res = decompress_torus_full(rest); - assert final_res = res; - tempvar range_check_ptr = range_check_ptr; - tempvar bitwise_ptr = bitwise_ptr; - tempvar poseidon_ptr = poseidon_ptr; - tempvar continuable_hash = continuable_hash; - tempvar poly_acc = poly_acc; - tempvar poly_acc_sq = poly_acc_sq; - tempvar z_pow1_5_ptr = z_pow1_5_ptr; - } else { - let res = e12.one_full(); - assert final_res = res; - tempvar range_check_ptr = range_check_ptr; - tempvar bitwise_ptr = bitwise_ptr; - tempvar poseidon_ptr = poseidon_ptr; - tempvar continuable_hash = continuable_hash; - tempvar poly_acc = poly_acc; - tempvar poly_acc_sq = poly_acc_sq; - tempvar z_pow1_5_ptr = z_pow1_5_ptr; - } - } else { - let res = e12.one_full(); - assert final_res = res; - tempvar range_check_ptr = range_check_ptr; - tempvar bitwise_ptr = bitwise_ptr; - tempvar poseidon_ptr = poseidon_ptr; - tempvar continuable_hash = continuable_hash; - tempvar poly_acc = poly_acc; - tempvar poly_acc_sq = poly_acc_sq; - tempvar z_pow1_5_ptr = z_pow1_5_ptr; - } - } - let range_check_ptr = range_check_ptr; - let bitwise_ptr = bitwise_ptr; - let poseidon_ptr = poseidon_ptr; - let continuable_hash = continuable_hash; - let poly_acc = poly_acc; - let poly_acc_sq = poly_acc_sq; - let z_pow1_5_ptr = z_pow1_5_ptr; - - // %{ print(f"hash={ids.continuable_hash}") %} - - let (local Z: BigInt3) = felt_to_bigint3(continuable_hash); - %{ print(f"Verifying final exponentiation hash commitment Z = continuable_hash") %} - assert Z.d0 - z_pow1_5_ptr.z_1.d0 = 0; - assert Z.d1 - z_pow1_5_ptr.z_1.d1 = 0; - assert Z.d2 - z_pow1_5_ptr.z_1.d2 = 0; - %{ print(f"Verifying Σc_i*A_i(z)*B_i(z) == P(z)Σc_i*Q_i(z) + Σc_i*R_i(z)") %} - verify_6th_extension_tricks(); - %{ print(f"Ok!") %} - - return final_res; - } -} - -// decompresses x ∈ E6 to (y+w)/(y-w) ∈ E12 -func decompress_torus_full{range_check_ptr, poseidon_ptr: PoseidonBuiltin*}(x: E6full*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local num: E12D = E12D( - w0=x.v0, - w1=BigInt3(1, 0, 0), - w2=x.v1, - w3=BigInt3(0, 0, 0), - w4=x.v2, - w5=BigInt3(0, 0, 0), - w6=x.v3, - w7=BigInt3(0, 0, 0), - w8=x.v4, - w9=BigInt3(0, 0, 0), - w10=x.v5, - w11=BigInt3(0, 0, 0), - ); - - local den: E12D = E12D( - w0=num.w0, - w1=BigInt3(-1, 0, 0), - w2=num.w2, - w3=num.w3, - w4=num.w4, - w5=num.w5, - w6=num.w6, - w7=num.w7, - w8=num.w8, - w9=num.w9, - w10=num.w10, - w11=num.w11, - ); - - let res = e12.div_full(&num, &den); - return res; -} - -// Canonical signed digit decomposition (Non-Adjacent form) of 6x₀+2 = 29793968203157093288 little endian -func get_NAF_digit(index: felt) -> felt { - let (_, pc) = get_fp_and_pc(); - - pc_label: - let data = pc + (bits - pc_label); - let res = [data + index]; - - return res; - - bits: - dw 0; - dw 0; - dw 0; - dw 1; - dw 0; - dw 1; - dw 0; - dw 2; - dw 0; - dw 0; - dw 2; - dw 0; - dw 0; - dw 0; - dw 1; - dw 0; - dw 0; - dw 2; - dw 0; - dw 2; - dw 0; - dw 0; - dw 0; - dw 1; - dw 0; - dw 2; - dw 0; - dw 0; - dw 0; - dw 0; - dw 2; - dw 0; - dw 0; - dw 1; - dw 0; - dw 2; - dw 0; - dw 0; - dw 1; - dw 0; - dw 0; - dw 0; - dw 0; - dw 0; - dw 2; - dw 0; - dw 0; - dw 2; - dw 0; - dw 1; - dw 0; - dw 2; - dw 0; - dw 0; - dw 0; - dw 2; // i = 55 - dw 0; // i = 56 - dw 2; // i = 57 - dw 0; // i = 58 - dw 0; // i = 59 - dw 0; // i = 60 - dw 1; // i = 61 - dw 0; // i = 62 - dw 2; // i = 63 - dw 0; // i = 64 - dw 1; // i = 65 -} diff --git a/archive_tmp/bn254/pairing_final_exp.py b/archive_tmp/bn254/pairing_final_exp.py deleted file mode 100644 index 1376cf93..00000000 --- a/archive_tmp/bn254/pairing_final_exp.py +++ /dev/null @@ -1,460 +0,0 @@ -from src.algebra import Polynomial -from src.algebra import PyFelt, BaseField -from tools.extension_trick import ( - gnark_to_v, - flatten, - neg_e6, - v_to_gnark, - pack_e6, - flatten, - div_e6, - mul_e6, - mul_e2, - inv_e12, - mul_e12, - pack_e12, - square_torus_e6, -) -from starkware.cairo.common.poseidon_hash import poseidon_hash - - -p = 0x30644E72E131A029B85045B68181585D97816A916871CA8D3C208C16D87CFD47 -BASE = 2**86 -DEGREE = 2 -STARK = 3618502788666131213697322783095070105623107215331596699973092056135872020481 -field = BaseField(p) - - -coeffs = [ - PyFelt(82, field), - field.zero(), - field.zero(), - PyFelt(-18 % p, field), - field.zero(), - field.zero(), - field.one(), -] -unreducible_poly = Polynomial(coeffs) - - -def split_128(a): - """Takes in value, returns uint256-ish tuple.""" - return (a & ((1 << 128) - 1), a >> 128) - - -def split(x): - coeffs = [] - for n in range(DEGREE, 0, -1): - q, r = divmod(x, BASE**n) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - - -def to_fp6(x: list) -> Polynomial: - return Polynomial([PyFelt(xi, field) for xi in x]) - - -def mul_torus( - y1: list, y2: list, continuable_hash: int, y1_bigint3=None, y2_bigint3=None -): - num_min_v, continuable_hash = mul_trick_e6( - y1, y2, continuable_hash, x_bigint3=y1_bigint3, y_bigint3=y2_bigint3 - ) - num_min_v[1] = num_min_v[1] + 1 - v1_bigint3 = split(num_min_v[1] - 1) - v1_bigint3 = (v1_bigint3[0] + 1, v1_bigint3[1], v1_bigint3[2]) - - num = num_min_v - den = [y1i + y2i for y1i, y2i in zip(y1, y2)] - if y1_bigint3 is None: - y1_bigint3 = [split(x) for x in y1] - if y2_bigint3 is None: - y2_bigint3 = [split(x) for x in y2] - - den_bigint3 = [ - (y1i[0] + y2i[0], y1i[1] + y2i[1], y1i[2] + y2i[2]) - for y1i, y2i in zip(y1_bigint3, y2_bigint3) - ] - res, continuable_hash = div_trick_e6( - num, den, continuable_hash, y_bigint3=den_bigint3 - ) - return res, continuable_hash - - -def div_trick_e6( - x: list, y: list, continuable_hash: int, x_bigint3=None, y_bigint3=None -) -> (list, int): - x_gnark, y_gnark = pack_e6(v_to_gnark(x)), pack_e6(v_to_gnark(y)) - div = flatten(div_e6(x_gnark, y_gnark)) - div = gnark_to_v(div) - check, h = mul_trick_e6( - y, div, continuable_hash, x_bigint3=y_bigint3, y_bigint3=None - ) - assert x == check, f"{x} != {check}" - return div, h - - -def mul_trick_e6( - x: list, y: list, continuable_hash: int, x_bigint3=None, y_bigint3=None -) -> (list, int): - x_poly, y_poly = to_fp6(x), to_fp6(y) - z_poly = x_poly * y_poly - z_polyq = z_poly // unreducible_poly - z_polyr = z_poly % unreducible_poly - z_polyq_coeffs = z_polyq.get_coeffs() - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq_coeffs + [0] * (5 - len(z_polyq_coeffs)) - z_polyr_coeffs = z_polyr_coeffs + [0] * (6 - len(z_polyr_coeffs)) - if x_bigint3 is None: - x3 = [split(e) for e in x] - else: - x3 = x_bigint3 - if y_bigint3 is None: - y3 = [split(e) for e in y] - else: - y3 = y_bigint3 - q2 = [split_128(e) for e in z_polyq_coeffs] - r3 = [split(e) for e in z_polyr_coeffs] - h = poseidon_hash(x3[0][0] * x3[0][1], continuable_hash) - h = poseidon_hash(x3[0][2] * x3[1][0], h) - h = poseidon_hash(x3[1][1] * x3[1][2], h) - h = poseidon_hash(x3[2][0] * x3[2][1], h) - h = poseidon_hash(x3[2][2] * x3[3][0], h) - h = poseidon_hash(x3[3][1] * x3[3][2], h) - h = poseidon_hash(x3[4][0] * x3[4][1], h) - h = poseidon_hash(x3[4][2] * x3[5][0], h) - h = poseidon_hash(x3[5][1] * x3[5][2], h) - h = poseidon_hash(y3[0][0] * y3[0][1], h) - h = poseidon_hash(y3[0][2] * y3[1][0], h) - h = poseidon_hash(y3[1][1] * y3[1][2], h) - h = poseidon_hash(y3[2][0] * y3[2][1], h) - h = poseidon_hash(y3[2][2] * y3[3][0], h) - h = poseidon_hash(y3[3][1] * y3[3][2], h) - h = poseidon_hash(y3[4][0] * y3[4][1], h) - h = poseidon_hash(y3[4][2] * y3[5][0], h) - h = poseidon_hash(y3[5][1] * y3[5][2], h) - h = poseidon_hash(q2[0][0] * r3[0][0], h) - h = poseidon_hash(q2[0][1] * r3[0][1], h) - h = poseidon_hash(q2[1][0] * r3[0][2], h) - h = poseidon_hash(q2[1][1] * r3[1][0], h) - h = poseidon_hash(q2[2][0] * r3[1][1], h) - h = poseidon_hash(q2[2][1] * r3[1][2], h) - h = poseidon_hash(q2[3][0] * r3[2][0], h) - h = poseidon_hash(q2[3][1] * r3[2][1], h) - h = poseidon_hash(q2[4][0] * r3[2][2], h) - h = poseidon_hash(q2[4][1] * r3[3][0], h) - h = poseidon_hash(r3[3][1] * r3[3][2], h) - h = poseidon_hash(r3[4][0] * r3[4][1], h) - h = poseidon_hash(r3[4][2] * r3[5][0], h) - h = poseidon_hash(r3[5][1] * r3[5][2], h) - - return z_polyr_coeffs, h - - -def expt_torus(x: list, continuable_hash: int) -> (list, int): - t3, continuable_hash = square_torus(x, continuable_hash) - t5, continuable_hash = square_torus(t3, continuable_hash) - result, continuable_hash = square_torus(t5, continuable_hash) - t0, continuable_hash = square_torus(result, continuable_hash) - t2, continuable_hash = mul_torus(x, t0, continuable_hash) - t0, continuable_hash = mul_torus(t3, t2, continuable_hash) - t1, continuable_hash = mul_torus(x, t0, continuable_hash) - t4, continuable_hash = mul_torus(result, t2, continuable_hash) - t6, continuable_hash = square_torus(t2, continuable_hash) - t1, continuable_hash = mul_torus(t0, t1, continuable_hash) - t0, continuable_hash = mul_torus(t3, t1, continuable_hash) - t6, continuable_hash = n_square_torus(t6, 6, continuable_hash) - t5, continuable_hash = mul_torus(t5, t6, continuable_hash) - t5, continuable_hash = mul_torus(t4, t5, continuable_hash) - t5, continuable_hash = n_square_torus(t5, 7, continuable_hash) - t4, continuable_hash = mul_torus(t4, t5, continuable_hash) - t4, continuable_hash = n_square_torus(t4, 8, continuable_hash) - t4, continuable_hash = mul_torus(t0, t4, continuable_hash) - t3, continuable_hash = mul_torus(t3, t4, continuable_hash) - t3, continuable_hash = n_square_torus(t3, 6, continuable_hash) - t2, continuable_hash = mul_torus(t2, t3, continuable_hash) - t2, continuable_hash = n_square_torus(t2, 8, continuable_hash) - t2, continuable_hash = mul_torus(t0, t2, continuable_hash) - t2, continuable_hash = n_square_torus(t2, 6, continuable_hash) - t2, continuable_hash = mul_torus(t0, t2, continuable_hash) - t2, continuable_hash = n_square_torus(t2, 10, continuable_hash) - t1, continuable_hash = mul_torus(t1, t2, continuable_hash) - t1, continuable_hash = n_square_torus(t1, 6, continuable_hash) - t0, continuable_hash = mul_torus(t0, t1, continuable_hash) - z, continuable_hash = mul_torus(result, t0, continuable_hash) - return z, continuable_hash - - -def n_square_torus(x: list, n: int, continuable_hash: int): - if n == 0: - return x, continuable_hash - else: - x, continuable_hash = square_torus(x, continuable_hash) - return n_square_torus(x, n - 1, continuable_hash) - - -def square_torus(x: list, continuable_hash: int): - x_gnark = pack_e6(v_to_gnark(x)) - sq = [int(x) for x in gnark_to_v(flatten(square_torus_e6(x_gnark)))] - v_tmp = [(2 * sq_i - x_i) % p for sq_i, x_i in zip(sq, x)] - x_bigint3 = [split(e) for e in x] - sq_bigint3 = [split(e) for e in sq] - v_tmp_bigint3 = [ - [ - (2 * sq_i[0] - x_i[0]) % STARK, - (2 * sq_i[1] - x_i[1]) % STARK, - (2 * sq_i[2] - x_i[2]) % STARK, - ] - for sq_i, x_i in zip(sq_bigint3, x_bigint3) - ] - x_poly = to_fp6(v_tmp) - y_poly = to_fp6(x) - z_poly = x_poly * y_poly - - z_polyq = z_poly // unreducible_poly - z_polyr = z_poly % unreducible_poly - z_polyq_coeffs = z_polyq.get_coeffs() - # print(f"z_polyq_coeffs={z_polyq_coeffs}") - z_polyr_coeffs = z_polyr.get_coeffs() - # print(f"z_polyr_coeffs={z_polyr_coeffs}") - x3 = v_tmp_bigint3 - y3 = x_bigint3 - q2 = [split_128(e) for e in z_polyq_coeffs] - h = poseidon_hash(x3[0][0] * x3[0][1], continuable_hash) - h = poseidon_hash(x3[0][2] * x3[1][0], h) - h = poseidon_hash(x3[1][1] * x3[1][2], h) - h = poseidon_hash(x3[2][0] * x3[2][1], h) - h = poseidon_hash(x3[2][2] * x3[3][0], h) - h = poseidon_hash(x3[3][1] * x3[3][2], h) - h = poseidon_hash(x3[4][0] * x3[4][1], h) - h = poseidon_hash(x3[4][2] * x3[5][0], h) - h = poseidon_hash(x3[5][1] * x3[5][2], h) - h = poseidon_hash(y3[0][0] * q2[0][0], h) - h = poseidon_hash(y3[0][1] * q2[0][1], h) - h = poseidon_hash(y3[0][2] * q2[1][0], h) - h = poseidon_hash(y3[1][0] * q2[1][1], h) - h = poseidon_hash(y3[1][1] * q2[2][0], h) - h = poseidon_hash(y3[1][2] * q2[2][1], h) - h = poseidon_hash(y3[2][0] * q2[3][0], h) - h = poseidon_hash(y3[2][1] * q2[3][1], h) - h = poseidon_hash(y3[2][2] * q2[4][0], h) - h = poseidon_hash(y3[3][0] * q2[4][1], h) - h = poseidon_hash(y3[3][1] * y3[3][2], h) - h = poseidon_hash(y3[4][0] * y3[4][1], h) - h = poseidon_hash(y3[4][2] * y3[5][0], h) - h = poseidon_hash(y3[5][1] * y3[5][2], h) - - return sq, h - - -def frobenius_square_torus(x: list): - x_fr2 = [ - x[0] * 2203960485148121921418603742825762020974279258880205651967 % p, - x[1] - * 21888242871839275220042445260109153167277707414472061641714758635765020556617 - % p, - x[2] - * 21888242871839275222246405745257275088696311157297823662689037894645226208582 - % p, - x[3] * 2203960485148121921418603742825762020974279258880205651967 % p, - x[4] - * 21888242871839275220042445260109153167277707414472061641714758635765020556617 - % p, - x[5] - * 21888242871839275222246405745257275088696311157297823662689037894645226208582 - % p, - ] - return x_fr2 - - -def frobenius_torus(x: list): - x_gnark = pack_e6(v_to_gnark(x)) - t0 = (x_gnark[0][0], -x_gnark[0][1] % p) - t1 = (x_gnark[1][0], -x_gnark[1][1] % p) - t2 = (x_gnark[2][0], -x_gnark[2][1] % p) - t1 = mul_e2( - t1, - ( - 21575463638280843010398324269430826099269044274347216827212613867836435027261, - 10307601595873709700152284273816112264069230130616436755625194854815875713954, - ), # (1,9)^(2*(p-1)/6) - ) - t2 = mul_e2( - t2, - ( - 2581911344467009335267311115468803099551665605076196740867805258568234346338, - 19937756971775647987995932169929341994314640652964949448313374472400716661030, - ), # (1,9)^(4*(p-1)/6) - ) - v0 = ( - 18566938241244942414004596690298913868373833782006617400804628704885040364344, - 5722266937896532885780051958958348231143373700109372999374820235121374419868, - ) # 1 / v^((p-1)/2) - res = flatten(mul_e6((t0, t1, t2), (v0, (0, 0), (0, 0)))) - # res_bigint3 = gnark_to_v_bigint3([split(x) for x in res]) - - res = gnark_to_v(res) - return res - - -def frobenius_cube_torus(x: list): - x_gnark = pack_e6(v_to_gnark(x)) - t0 = (x_gnark[0][0], -x_gnark[0][1] % p) - t1 = (x_gnark[1][0], -x_gnark[1][1] % p) - t2 = (x_gnark[2][0], -x_gnark[2][1] % p) - t1 = mul_e2( - t1, - ( - 3772000881919853776433695186713858239009073593817195771773381919316419345261, - 2236595495967245188281701248203181795121068902605861227855261137820944008926, - ), # (1,9)^(2*(p^3-1)/6) - ) - t2 = mul_e2( - t2, - ( - 5324479202449903542726783395506214481928257762400643279780343368557297135718, - 16208900380737693084919495127334387981393726419856888799917914180988844123039, - ), # (1,9)^(4*(p^3-1)/6) - ) - v0 = ( - 10190819375481120917420622822672549775783927716138318623895010788866272024264, - 303847389135065887422783454877609941456349188919719272345083954437860409601, - ) # 1 / v^((p^3-1)/2) - res = flatten(mul_e6((t0, t1, t2), (v0, (0, 0), (0, 0)))) - res_bigint3 = gnark_to_v_bigint3([split(x) for x in res]) - res = gnark_to_v(res) - return res, res_bigint3 - - -def inverse_torus(x: list): - return [-xi % p for xi in x] - - -def decompress_torus(x: ((int, int), (int, int), (int, int))): - num = (x, ((1, 0), (0, 0), (0, 0))) - den = (x, ((-1 % p, 0), (0, 0), (0, 0))) - res = pack_e12(inv_e12(den[0], den[1])) - res = mul_e12(num, res) - return res - - -def final_exponentiation( - z: (((int, int), (int, int), (int, int)), ((int, int), (int, int), (int, int))), - unsafe: bool, - continuable_hash: int = int.from_bytes(b"GaragaBN254FinalExp", "big"), -): - if unsafe: - z_c1 = z[1] - else: - if z[1] == ((0, 0), (0, 0), (0, 0)): - selector1 = 1 - z_c1 = ((1, 0), (0, 0), (0, 0)) - else: - selector1 = 0 - z_c1 = z[1] - - c_num_full = gnark_to_v(flatten(neg_e6(z[0]))) - z_c1_full = gnark_to_v(flatten(z_c1)) - - c, continuable_hash = div_trick_e6(c_num_full, z_c1_full, continuable_hash) - t0 = frobenius_square_torus(c) - c, continuable_hash = mul_torus(t0, c, continuable_hash) - t0, continuable_hash = expt_torus(c, continuable_hash) - t0 = inverse_torus(t0) - t0, continuable_hash = square_torus(t0, continuable_hash) - t1, continuable_hash = square_torus(t0, continuable_hash) - t1, continuable_hash = mul_torus(t0, t1, continuable_hash) - t2, continuable_hash = expt_torus(t1, continuable_hash) - t2 = inverse_torus(t2) - t3 = inverse_torus(t1) - t1, continuable_hash = mul_torus(t2, t3, continuable_hash) - t3, continuable_hash = square_torus(t2, continuable_hash) - t4, continuable_hash = expt_torus(t3, continuable_hash) - t4, continuable_hash = mul_torus(t1, t4, continuable_hash) - t3, continuable_hash = mul_torus(t0, t4, continuable_hash) - t0, continuable_hash = mul_torus(t2, t4, continuable_hash) - t0, continuable_hash = mul_torus(c, t0, continuable_hash) - t2, t2_bigint3 = frobenius_torus(t3) - t0, continuable_hash = mul_torus(t2, t0, continuable_hash, y1_bigint3=t2_bigint3) - t2 = frobenius_square_torus(t4) - t0, continuable_hash = mul_torus(t2, t0, continuable_hash) - t2 = inverse_torus(c) - t2, continuable_hash = mul_torus(t2, t3, continuable_hash) - t2, t2_bigint3 = frobenius_cube_torus(t2) - if unsafe: - rest, continuable_hash = mul_torus( - t2, t0, continuable_hash, y1_bigint3=t2_bigint3 - ) - res = pack_e6(v_to_gnark(rest)) - res = decompress_torus(res) - return res, continuable_hash - else: - _sum = [t0i + t2i % p for t0i, t2i in zip(t0, t2)] - is_zero = all([e == 0 for e in _sum]) - if is_zero: - t0t = Polynomial( - [ - PyFelt(1, field), - field.zero(), - field.zero(), - field.zero(), - field.zero(), - field.zero(), - ] - ) - else: - t0t = t0 - - if selector1 == 0: - if is_zero == 0: - rest, continuable_hash = mul_torus( - t2, t0t, continuable_hash, y1_bigint3=t2_bigint3 - ) - res = v_to_gnark(rest) - res = decompress_torus(pack_e6(res)) - return res, continuable_hash - else: - res = (((1, 0), (0, 0), (0, 0)), ((0, 0), (0, 0), (0, 0))) - return res, continuable_hash - else: - res = (((1, 0), (0, 0), (0, 0)), ((0, 0), (0, 0), (0, 0))) - return res, continuable_hash - - -if __name__ == "__main__": - x = [ - 15631577932152315104652445523700417040601500707877284609546312920354446056447, - 1274881022144191920838043222130710344172476924365725732436425248566978625605, - 14374765490310691286872600100687989211994071432725749506715026469291207213364, - 19232683452852686150799946178434694116955802884971349389480427332156028484678, - 4711060662209480322403082802390043737109415216436721343938907246739585294619, - 12628528420035269572171509623830053865991813551619118245630623189571187704212, - 6132046658265970172317265843030970288646178101127187503319861429480398294166, - 696877141756131447795834834192003128716698847022516178077777960435426094082, - 19968037526512504126402565293093453753511856148614571257107664150629413134903, - 19711115225256248898674588007895864056457997172157519591556283079102178159639, - 4264731731400846354398198898948247059528185839861404225131520284631392266215, - 3153660797904284033741194851243498835351306539671786555576214661552094399141, - ] - z = [ - 17264119758069723980713015158403419364912226240334615592005620718956030922389, - 1300711225518851207585954685848229181392358478699795190245709208408267917898, - 8894217292938489450175280157304813535227569267786222825147475294561798790624, - 1829859855596098509359522796979920150769875799037311140071969971193843357227, - 4968700049505451466697923764727215585075098085662966862137174841375779106779, - 12814315002058128940449527172080950701976819591738376253772993495204862218736, - 4233474252585134102088637248223601499779641130562251948384759786370563844606, - 9420544134055737381096389798327244442442230840902787283326002357297404128074, - 13457906610892676317612909831857663099224588803620954529514857102808143524905, - 5122435115068592725432309312491733755581898052459744089947319066829791570839, - 8891987925005301465158626530377582234132838601606565363865129986128301774627, - 440796048150724096437130979851431985500142692666486515369083499585648077975, - ] - x = pack_e12(x) - f, continuable_hash = final_exponentiation(x, True) - print(f"f = {f}") - print(f"z = {z}") - print(f"hash={continuable_hash}") - assert pack_e12(z) == f diff --git a/archive_tmp/bn254/pairing_multi_miller.py b/archive_tmp/bn254/pairing_multi_miller.py deleted file mode 100644 index 06f3f658..00000000 --- a/archive_tmp/bn254/pairing_multi_miller.py +++ /dev/null @@ -1,775 +0,0 @@ -from algebra import Polynomial -from algebra import PyFelt, BaseField -from starkware.cairo.common.poseidon_hash import poseidon_hash -from hints.io import bigint_split, split_128 -from hints.tower_backup import E2 -from definitions import IRREDUCIBLE_POLY_12 -from dataclasses import dataclass -from tools.extension_trick import w_to_gnark - -import numpy as np - -p = 0x30644E72E131A029B85045B68181585D97816A916871CA8D3C208C16D87CFD47 -STARK = 3618502788666131213697322783095070105623107215331596699973092056135872020481 -field = BaseField(p) -N_LIMBS = 3 -BASE = 2**86 - - -def NAF(x): - if x == 0: - return [] - z = 0 if x % 2 == 0 else 2 - (x % 4) - return NAF((x - z) // 2) + [z] - - -BITS = NAF(29793968203157093288)[::-1] - - -def mul_by_non_residue_k_pow_j(x: E2, k: int, j: int): - if (k, j) == (1, 2): - tmp = E2( - 21575463638280843010398324269430826099269044274347216827212613867836435027261, - 10307601595873709700152284273816112264069230130616436755625194854815875713954, - x.p, - ) - return x * tmp - if (k, j) == (1, 3): - tmp = E2( - 2821565182194536844548159561693502659359617185244120367078079554186484126554, - 3505843767911556378687030309984248845540243509899259641013678093033130930403, - x.p, - ) - return x * tmp - if (k, j) == (2, 2): - tmp = 21888242871839275220042445260109153167277707414472061641714758635765020556616 - return E2(x.a0 * tmp % x.p, x.a1 * tmp % x.p, x.p) - if (k, j) == (2, 3): - tmp = 21888242871839275222246405745257275088696311157297823662689037894645226208582 - return E2(x.a0 * tmp % x.p, x.a1 * tmp % x.p, x.p) - else: - raise NotImplementedError - - -@dataclass -class BigInt3: - d0: int - d1: int - d2: int - - -@dataclass -class E12_034: - w1: int - w3: int - w7: int - w9: int - - def to_E12(self): - return E12( - 1, - self.w1, - 0, - self.w3, - 0, - 0, - 0, - self.w7, - 0, - self.w9, - 0, - 0, - ) - - def to_poly(self): - return Polynomial( - [ - field.one(), - PyFelt(self.w1, field), - field.zero(), - PyFelt(self.w3, field), - field.zero(), - field.zero(), - field.zero(), - PyFelt(self.w7, field), - field.zero(), - PyFelt(self.w9, field), - field.zero(), - field.zero(), - ] - ) - - def to_bigint3(self): - pow_idw = ["1", "3", "7", "9"] - coeffs = [getattr(self, f"w{i}") for i in pow_idw] - return [bigint_split(x, N_LIMBS, BASE) for x in coeffs] - - def hash(self, continuable_hash: int): - x3 = self.to_bigint3() - h = poseidon_hash(x3[0][0] * x3[0][1], continuable_hash) - h = poseidon_hash(x3[0][2] * x3[1][0], h) - h = poseidon_hash(x3[1][1] * x3[1][2], h) - h = poseidon_hash(x3[2][0] * x3[2][1], h) - h = poseidon_hash(x3[2][2] * x3[3][0], h) - h = poseidon_hash(x3[3][1] * x3[3][2], h) - return h - - -@dataclass -class E12_01234: - w0: int - w1: int - w2: int - w3: int - w4: int - w6: int - w7: int - w8: int - w9: int - w10: int - w11: int - - def to_E12(self): - return E12( - self.w0, - self.w1, - self.w2, - self.w3, - self.w4, - 0, - self.w6, - self.w7, - self.w8, - self.w9, - self.w10, - self.w11, - ) - - def to_poly(self): - return Polynomial( - [ - PyFelt(self.w0, field), - PyFelt(self.w1, field), - PyFelt(self.w2, field), - PyFelt(self.w3, field), - PyFelt(self.w4, field), - field.zero(), - PyFelt(self.w6, field), - PyFelt(self.w7, field), - PyFelt(self.w8, field), - PyFelt(self.w9, field), - PyFelt(self.w10, field), - PyFelt(self.w11, field), - ] - ) - - def to_bigint3(self): - pow_idx = ["0", "1", "2", "3", "4", "6", "7", "8", "9", "10", "11"] - coeffs = [getattr(self, f"w{i}") for i in pow_idx] - return [bigint_split(x, N_LIMBS, BASE) for x in coeffs] - - def hash(self, continuable_hash: int, cut=False): - x3 = self.to_bigint3() - if cut == False: - h = poseidon_hash(x3[0][0] * x3[0][1], continuable_hash) - h = poseidon_hash(x3[0][2] * x3[1][0], h) - h = poseidon_hash(x3[1][1] * x3[1][2], h) - h = poseidon_hash(x3[2][0] * x3[2][1], h) - h = poseidon_hash(x3[2][2] * x3[3][0], h) - h = poseidon_hash(x3[3][1] * x3[3][2], h) - h = poseidon_hash(x3[4][0] * x3[4][1], h) - else: - h = continuable_hash - h = poseidon_hash(x3[4][2] * x3[5][0], h) - h = poseidon_hash(x3[5][1] * x3[5][2], h) - h = poseidon_hash(x3[6][0] * x3[6][1], h) - h = poseidon_hash(x3[6][2] * x3[7][0], h) - h = poseidon_hash(x3[7][1] * x3[7][2], h) - h = poseidon_hash(x3[8][0] * x3[8][1], h) - h = poseidon_hash(x3[8][2] * x3[9][0], h) - h = poseidon_hash(x3[9][1] * x3[9][2], h) - h = poseidon_hash(x3[10][0] * x3[10][1], h) - h = poseidon_hash(x3[10][2], h) - return h - - -@dataclass -class E12: - w0: int - w1: int - w2: int - w3: int - w4: int - w5: int - w6: int - w7: int - w8: int - w9: int - w10: int - w11: int - - def __str__(self) -> str: - coeffs = [getattr(self, f"w{i}") for i in range(12)] - coeffs_gnark = w_to_gnark(coeffs) - str = "" - for i in range(12): - str += f"w{i} {np.base_repr(coeffs_gnark[i], 36)}\n" - return str - - def to_gnark(self): - coeffs = [getattr(self, f"w{i}") for i in range(12)] - coeffs_gnark = w_to_gnark(coeffs) - return coeffs_gnark - - def to_poly(self): - return Polynomial( - [ - PyFelt(self.w0, field), - PyFelt(self.w1, field), - PyFelt(self.w2, field), - PyFelt(self.w3, field), - PyFelt(self.w4, field), - PyFelt(self.w5, field), - PyFelt(self.w6, field), - PyFelt(self.w7, field), - PyFelt(self.w8, field), - PyFelt(self.w9, field), - PyFelt(self.w10, field), - PyFelt(self.w11, field), - ] - ) - - def to_bigint3(self): - coeffs = [getattr(self, f"w{i}") for i in range(12)] - - return [bigint_split(x, N_LIMBS, BASE) for x in coeffs] - - def hash(self, continuable_hash: int, cut=False): - x3 = self.to_bigint3() - if cut == False: - h = poseidon_hash(x3[0][0] * x3[0][1], continuable_hash) - h = poseidon_hash(x3[0][2] * x3[1][0], h) - h = poseidon_hash(x3[1][1] * x3[1][2], h) - h = poseidon_hash(x3[2][0] * x3[2][1], h) - h = poseidon_hash(x3[2][2] * x3[3][0], h) - h = poseidon_hash(x3[3][1] * x3[3][2], h) - h = poseidon_hash(x3[4][0] * x3[4][1], h) - h = poseidon_hash(x3[4][2] * x3[5][0], h) - h = poseidon_hash(x3[5][1] * x3[5][2], h) - h = poseidon_hash(x3[6][0] * x3[6][1], h) - h = poseidon_hash(x3[6][2] * x3[7][0], h) - elif cut == "w5_d2": - h = poseidon_hash(x3[6][0] * x3[6][1], continuable_hash) - h = poseidon_hash(x3[6][2] * x3[7][0], h) - elif cut == "w7_d0": - h = continuable_hash - h = poseidon_hash(x3[7][1] * x3[7][2], h) - h = poseidon_hash(x3[8][0] * x3[8][1], h) - h = poseidon_hash(x3[8][2] * x3[9][0], h) - h = poseidon_hash(x3[9][1] * x3[9][2], h) - h = poseidon_hash(x3[10][0] * x3[10][1], h) - h = poseidon_hash(x3[10][2] * x3[11][0], h) - h = poseidon_hash(x3[11][1] * x3[11][2], h) - return h - - -@dataclass -class G2Point: - x: E2 - y: E2 - - def __str__(self) -> str: - return f"X: {self.x}\nY: {self.y}" - - def __neg__(self): - return G2Point(self.x, -self.y) - - @classmethod - def compute_slope(cls, pt1, pt2) -> E2: - return (pt1.y - pt2.y) / (pt1.x - pt2.x) - - @classmethod - def line_compute(cls, pt1: "G2Point", pt2: "G2Point") -> E12_034: - C = cls.compute_slope(pt1, pt2) - l1r1 = C * pt1.x - pt1.y - return E12_034( - w1=C.a0 - 9 * C.a1, w3=l1r1.a0 - 9 * l1r1.a1, w7=C.a1, w9=l1r1.a1 - ) - - def double_step(self) -> ("G2Point", E12_034): - C = (3 * self.x * self.x) / (2 * self.y) - nx = C * C - 2 * self.x - E = C * self.x - self.y - ny = E - C * nx - line = E12_034(w1=C.a0 - 9 * C.a1, w3=E.a0 - 9 * E.a1, w7=C.a1, w9=E.a1) - - return G2Point(nx, ny), line - - def add_step(self, pt: "G2Point") -> ("G2Point", E12_034): - C = self.compute_slope(self, pt) - nx = C * C - self.x - pt.x - E = C * self.x - self.y - ny = E - C * nx - line = E12_034(w1=C.a0 - 9 * C.a1, w3=E.a0 - 9 * E.a1, w7=C.a1, w9=E.a1) - return G2Point(nx, ny), line - - def double_and_add_step(self, pt: "G2Point") -> ("G2Point", E12_034, E12_034): - lambda1 = self.compute_slope(self, pt) - x3 = lambda1 * lambda1 - self.x - pt.x - lambda2 = -lambda1 - ((2 * self.y) / (x3 - self.x)) - x4 = lambda2 * lambda2 - self.x - x3 - y4 = lambda2 * (self.x - x4) - self.y - - l1r1 = lambda1 * self.x - self.y - l2r1 = lambda2 * self.x - self.y - - l1034 = E12_034( - lambda1.a0 - 9 * lambda1.a1, l1r1.a0 - 9 * l1r1.a1, lambda1.a1, l1r1.a1 - ) - l2034 = E12_034( - lambda2.a0 - 9 * lambda2.a1, l2r1.a0 - 9 * l2r1.a1, lambda2.a1, l2r1.a1 - ) - return G2Point(x4, y4), l1034, l2034 - - -@dataclass -class G1Point: - x: int - y: int - - -def multi_miller_loop( - P_arr, - Q_arr, - n_points, - continuable_hash=int.from_bytes(b"GaragaBN254MillerLoop", "big"), -): - assert len(P_arr) == len(Q_arr) == n_points - Q_acc = Q_arr.copy() - Q_neg = [Q_arr[i].__neg__() for i in range(n_points)] - yInv = [pow(P_arr[i].y, -1, p) for i in range(n_points)] - xOverY = [-P_arr[i].x * yInv[i] % p for i in range(n_points)] - - # // Compute ∏ᵢ { fᵢ_{6x₀+2,Q}(P) } - # // i = 64, separately to avoid an E12 Square - # // (Square(res) = 1² = 1) - - # // k = 0, separately to avoid MulBy034 (res × ℓ) - # // (assign line to res) - # print("Q_acc[0]", Q_acc[0]) - - new_Q0, l1 = Q_acc[0].double_step() - Q_acc[0] = new_Q0 - # print("Q_acc[0]", Q_acc[0]) - res_init = E12_034( - xOverY[0] * l1.w1 % p, - yInv[0] * l1.w3 % p, - xOverY[0] * l1.w7 % p, - yInv[0] * l1.w9 % p, - ) - - if n_points >= 2: - new_Q1, l1 = Q_acc[1].double_step() - Q_acc[1] = new_Q1 - l1f = E12_034( - xOverY[1] * l1.w1 % p, - yInv[1] * l1.w3 % p, - xOverY[1] * l1.w7 % p, - yInv[1] * l1.w9 % p, - ) - res_t01234, continuable_hash = mul034_034_trick(l1f, res_init, continuable_hash) - res = res_t01234.to_E12() - else: - res = res_init.to_E12() - # print("resInit", res.to_gnark()) - # print("hashInit", continuable_hash) - if n_points >= 3: - new_Q2, l1 = Q_acc[2].double_step() - Q_acc[2] = new_Q2 - l1f = E12_034( - xOverY[2] * l1.w1 % p, - yInv[2] * l1.w3 % p, - xOverY[2] * l1.w7 % p, - yInv[2] * l1.w9 % p, - ) - res, continuable_hash = mul034_trick(res, l1f, continuable_hash) - # n > 3 - for k in range(n_points - 1, 2, -1): - new_Q, l1 = Q_acc[k].double_step() - Q_acc[k] = new_Q - l1f = E12_034( - xOverY[k] * l1.w1 % p, - yInv[k] * l1.w3 % p, - xOverY[k] * l1.w7 % p, - yInv[k] * l1.w9 % p, - ) - res, continuable_hash = mul034_trick(res, l1f, continuable_hash) - # i63 - res, continuable_hash = square_trick(res, continuable_hash) - # print("HASH63", continuable_hash) - for k in range(0, n_points): - l2 = G2Point.line_compute(Q_acc[k], Q_neg[k]) - l2f = E12_034( - xOverY[k] * l2.w1 % p, - yInv[k] * l2.w3 % p, - xOverY[k] * l2.w7 % p, - yInv[k] * l2.w9 % p, - ) - (new_Q, l1) = Q_acc[k].add_step(Q_arr[k]) - Q_acc[k] = new_Q - l1f = E12_034( - xOverY[k] * l1.w1 % p, - yInv[k] * l1.w3 % p, - xOverY[k] * l1.w7 % p, - yInv[k] * l1.w9 % p, - ) - prod_lines, continuable_hash = mul034_034_trick(l1f, l2f, continuable_hash) - # print("HASH034034_63", continuable_hash) - res, continuable_hash = mul01234_trick(res, prod_lines, continuable_hash) - # print("HASH01234_63", continuable_hash) - lines = n_points * [None] - # print("resBefMulti", res.to_gnark()) - # print("hashBefMulti", continuable_hash) - - for i in range(62, -1, -1): - res, continuable_hash = square_trick(res, continuable_hash) - bit = BITS[i] - if bit == 0: - for k in range(0, n_points): - new_Q, l1 = Q_acc[k].double_step() - Q_acc[k] = new_Q - l1f = E12_034( - xOverY[k] * l1.w1 % p, - yInv[k] * l1.w3 % p, - xOverY[k] * l1.w7 % p, - yInv[k] * l1.w9 % p, - ) - lines[k] = l1f - if n_points % 2 != 0: - res, continuable_hash = mul034_trick( - res, lines[n_points - 1], continuable_hash - ) - for k in range(1, n_points, 2): - prod_lines, continuable_hash = mul034_034_trick( - lines[k], lines[k - 1], continuable_hash - ) - res, continuable_hash = mul01234_trick( - res, prod_lines, continuable_hash - ) - elif bit == 1: - for k in range(0, n_points): - new_Q, l1, l2 = Q_acc[k].double_and_add_step(Q_arr[k]) - Q_acc[k] = new_Q - l1f = E12_034( - xOverY[k] * l1.w1 % p, - yInv[k] * l1.w3 % p, - xOverY[k] * l1.w7 % p, - yInv[k] * l1.w9 % p, - ) - l2f = E12_034( - xOverY[k] * l2.w1 % p, - yInv[k] * l2.w3 % p, - xOverY[k] * l2.w7 % p, - yInv[k] * l2.w9 % p, - ) - prod_lines, continuable_hash = mul034_034_trick( - l1f, l2f, continuable_hash - ) - res, continuable_hash = mul01234_trick( - res, prod_lines, continuable_hash - ) - elif bit == -1: - for k in range(0, n_points): - new_Q, l1, l2 = Q_acc[k].double_and_add_step(Q_neg[k]) - Q_acc[k] = new_Q - l1f = E12_034( - xOverY[k] * l1.w1 % p, - yInv[k] * l1.w3 % p, - xOverY[k] * l1.w7 % p, - yInv[k] * l1.w9 % p, - ) - l2f = E12_034( - xOverY[k] * l2.w1 % p, - yInv[k] * l2.w3 % p, - xOverY[k] * l2.w7 % p, - yInv[k] * l2.w9 % p, - ) - prod_lines, continuable_hash = mul034_034_trick( - l1f, l2f, continuable_hash - ) - res, continuable_hash = mul01234_trick( - res, prod_lines, continuable_hash - ) - # print("resBefFinalLoop", res.to_gnark()) - for k in range(0, n_points): - q1x = Q_arr[k].x.conjugate() - q1y = Q_arr[k].y.conjugate() - q1x = mul_by_non_residue_k_pow_j(q1x, 1, 2) - q1y = mul_by_non_residue_k_pow_j(q1y, 1, 3) - - q2x = mul_by_non_residue_k_pow_j(Q_arr[k].x, 2, 2) - q2y = mul_by_non_residue_k_pow_j(Q_arr[k].y, 2, 3) - q2y = -q2y - - Q1 = G2Point(q1x, q1y) - # print("Q1\n", Q1) - Q2 = G2Point(q2x, q2y) - # print("Q2\n", Q2) - - Q_acc[k], l1 = Q_acc[k].add_step(Q1) - l1f = E12_034( - xOverY[k] * l1.w1 % p, - yInv[k] * l1.w3 % p, - xOverY[k] * l1.w7 % p, - yInv[k] * l1.w9 % p, - ) - - l2 = G2Point.line_compute(Q_acc[k], G2Point(q2x, q2y)) - l2f = E12_034( - xOverY[k] * l2.w1 % p, - yInv[k] * l2.w3 % p, - xOverY[k] * l2.w7 % p, - yInv[k] * l2.w9 % p, - ) - - prod_lines, continuable_hash = mul034_034_trick(l1f, l2f, continuable_hash) - res, continuable_hash = mul01234_trick(res, prod_lines, continuable_hash) - - return res, continuable_hash - - -def mul034_034_trick(x: E12_034, y: E12_034, continuable_hash: int) -> (E12_01234, int): - z_poly = x.to_poly() * y.to_poly() - z_polyr = z_poly % IRREDUCIBLE_POLY_12 - z_polyq = z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs) <= 7 - z_polyr_coeffs = z_polyr_coeffs + [0] * (12 - len(z_polyr_coeffs)) - r_w5 = z_polyr_coeffs.pop(5) - assert r_w5 == 0, f"Not a 01234, w5={r_w5}" - z_polyq_coeffs = z_polyq_coeffs + [0] * (7 - len(z_polyq_coeffs)) - - q2 = [split_128(x) for x in z_polyq_coeffs] - R = E12_01234(*z_polyr_coeffs) - r3 = R.to_bigint3() - - h = x.hash(continuable_hash=continuable_hash) - h = y.hash(continuable_hash=h) - h = poseidon_hash(q2[0][0] * r3[0][0], h) - h = poseidon_hash(q2[0][1] * r3[0][1], h) - h = poseidon_hash(q2[1][0] * r3[0][2], h) - h = poseidon_hash(q2[1][1] * r3[1][0], h) - h = poseidon_hash(q2[2][0] * r3[1][1], h) - h = poseidon_hash(q2[2][1] * r3[1][2], h) - h = poseidon_hash(q2[3][0] * r3[2][0], h) - h = poseidon_hash(q2[3][1] * r3[2][1], h) - h = poseidon_hash(q2[4][0] * r3[2][2], h) - h = poseidon_hash(q2[4][1] * r3[3][0], h) - h = poseidon_hash(q2[5][0] * r3[3][1], h) - h = poseidon_hash(q2[5][1] * r3[3][2], h) - h = poseidon_hash(q2[6][0] * r3[4][0], h) - h = poseidon_hash(q2[6][1] * r3[4][1], h) - h = R.hash(continuable_hash=h, cut=True) - - return R, h - - -def mul034_trick(x: E12, y: E12_034, continuable_hash: int) -> (E12, int): - z_poly = x.to_poly() * y.to_poly() - z_polyr = z_poly % IRREDUCIBLE_POLY_12 - z_polyq = z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs) <= 9 - z_polyr_coeffs = z_polyr_coeffs + [0] * (12 - len(z_polyr_coeffs)) - z_polyq_coeffs = z_polyq_coeffs + [0] * (9 - len(z_polyq_coeffs)) - q2 = [split_128(x) for x in z_polyq_coeffs] - R = E12(*z_polyr_coeffs) - r3 = R.to_bigint3() - h = x.hash(continuable_hash=continuable_hash) - h = y.hash(continuable_hash=h) - h = poseidon_hash(q2[0][0] * r3[0][0], h) - h = poseidon_hash(q2[0][1] * r3[0][1], h) - h = poseidon_hash(q2[1][0] * r3[0][2], h) - h = poseidon_hash(q2[1][1] * r3[1][0], h) - h = poseidon_hash(q2[2][0] * r3[1][1], h) - h = poseidon_hash(q2[2][1] * r3[1][2], h) - h = poseidon_hash(q2[3][0] * r3[2][0], h) - h = poseidon_hash(q2[3][1] * r3[2][1], h) - h = poseidon_hash(q2[4][0] * r3[2][2], h) - h = poseidon_hash(q2[4][1] * r3[3][0], h) - h = poseidon_hash(q2[5][0] * r3[3][1], h) - h = poseidon_hash(q2[5][1] * r3[3][2], h) - h = poseidon_hash(q2[6][0] * r3[4][0], h) - h = poseidon_hash(q2[6][1] * r3[4][1], h) - h = poseidon_hash(q2[7][0] * r3[4][2], h) - h = poseidon_hash(q2[7][1] * r3[5][0], h) - h = poseidon_hash(q2[8][0] * r3[5][1], h) - h = poseidon_hash(q2[8][1] * r3[5][2], h) - - h = R.hash(continuable_hash=h, cut="w5_d2") - - return R, h - - -def square_trick(x: E12, continuable_hash: int) -> (E12, int): - x_poly = x.to_poly() - z_poly = x_poly * x_poly - z_polyr = z_poly % IRREDUCIBLE_POLY_12 - z_polyq = z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs) <= 11 - z_polyr_coeffs = z_polyr_coeffs + [0] * (12 - len(z_polyr_coeffs)) - z_polyq_coeffs = z_polyq_coeffs + [0] * (11 - len(z_polyq_coeffs)) - q2 = [split_128(x) for x in z_polyq_coeffs] - R = E12(*z_polyr_coeffs) - r3 = R.to_bigint3() - h = x.hash(continuable_hash=continuable_hash) - - h = poseidon_hash(q2[0][0] * r3[0][0], h) - h = poseidon_hash(q2[0][1] * r3[0][1], h) - h = poseidon_hash(q2[1][0] * r3[0][2], h) - h = poseidon_hash(q2[1][1] * r3[1][0], h) - h = poseidon_hash(q2[2][0] * r3[1][1], h) - h = poseidon_hash(q2[2][1] * r3[1][2], h) - h = poseidon_hash(q2[3][0] * r3[2][0], h) - h = poseidon_hash(q2[3][1] * r3[2][1], h) - h = poseidon_hash(q2[4][0] * r3[2][2], h) - h = poseidon_hash(q2[4][1] * r3[3][0], h) - h = poseidon_hash(q2[5][0] * r3[3][1], h) - h = poseidon_hash(q2[5][1] * r3[3][2], h) - h = poseidon_hash(q2[6][0] * r3[4][0], h) - h = poseidon_hash(q2[6][1] * r3[4][1], h) - h = poseidon_hash(q2[7][0] * r3[4][2], h) - h = poseidon_hash(q2[7][1] * r3[5][0], h) - h = poseidon_hash(q2[8][0] * r3[5][1], h) - h = poseidon_hash(q2[8][1] * r3[5][2], h) - h = poseidon_hash(q2[9][0] * r3[6][0], h) - h = poseidon_hash(q2[9][1] * r3[6][1], h) - h = poseidon_hash(q2[10][0] * r3[6][2], h) - h = poseidon_hash(q2[10][1] * r3[7][0], h) - - h = R.hash(continuable_hash=h, cut="w7_d0") - - return R, h - - -def mul01234_trick(x: E12, y: E12_01234, continuable_hash: int) -> (E12, int): - z_poly = x.to_poly() * y.to_poly() - z_polyr = z_poly % IRREDUCIBLE_POLY_12 - z_polyq = z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs) <= 11 - z_polyr_coeffs = z_polyr_coeffs + [0] * (12 - len(z_polyr_coeffs)) - z_polyq_coeffs = z_polyq_coeffs + [0] * (11 - len(z_polyq_coeffs)) - q2 = [split_128(x) for x in z_polyq_coeffs] - R = E12(*z_polyr_coeffs) - r3 = R.to_bigint3() - h = x.hash(continuable_hash=continuable_hash) - h = y.hash(continuable_hash=h) - h = poseidon_hash(q2[0][0] * r3[0][0], h) - h = poseidon_hash(q2[0][1] * r3[0][1], h) - h = poseidon_hash(q2[1][0] * r3[0][2], h) - h = poseidon_hash(q2[1][1] * r3[1][0], h) - h = poseidon_hash(q2[2][0] * r3[1][1], h) - h = poseidon_hash(q2[2][1] * r3[1][2], h) - h = poseidon_hash(q2[3][0] * r3[2][0], h) - h = poseidon_hash(q2[3][1] * r3[2][1], h) - h = poseidon_hash(q2[4][0] * r3[2][2], h) - h = poseidon_hash(q2[4][1] * r3[3][0], h) - h = poseidon_hash(q2[5][0] * r3[3][1], h) - h = poseidon_hash(q2[5][1] * r3[3][2], h) - h = poseidon_hash(q2[6][0] * r3[4][0], h) - h = poseidon_hash(q2[6][1] * r3[4][1], h) - h = poseidon_hash(q2[7][0] * r3[4][2], h) - h = poseidon_hash(q2[7][1] * r3[5][0], h) - h = poseidon_hash(q2[8][0] * r3[5][1], h) - h = poseidon_hash(q2[8][1] * r3[5][2], h) - h = poseidon_hash(q2[9][0] * r3[6][0], h) - h = poseidon_hash(q2[9][1] * r3[6][1], h) - h = poseidon_hash(q2[10][0] * r3[6][2], h) - h = poseidon_hash(q2[10][1] * r3[7][0], h) - h = R.hash(continuable_hash=h, cut="w7_d0") - - return R, h - - -if __name__ == "__main__": - G1 = G1Point(1, 2) - G2 = G2Point( - E2( - 10857046999023057135944570762232829481370756359578518086990519993285655852781, - 11559732032986387107991004021392285783925812861821192530917403151452391805634, - p, - ), - E2( - 8495653923123431417604973247489272438418190587263600148770280649306958101930, - 4082367875863433681332203403145435568316851327593401208105741076214120093531, - p, - ), - ) - - # m, h = multi_miller_loop([G1], [G2], 1) - - # print(m) - # print(h) - - P = [ - G1Point( - x=6424909707529041010431833767196069900905951186152453452535233785859310247091, - y=6731815178901292517144522853524599129026091167900349143473443684504480249601, - ), - G1Point( - x=10154908101955836410822568671359241381386977712614837423695806888806827087120, - y=6694119430739204200412415739739878806463649419827004538677705571548899835345, - ), - G1Point( - x=11664089827190113040588903049366218671264446383108882453852976389666897952784, - y=11964654005374721149828827734828350582389212487311147825692987599394401865041, - ), - ] - Q = [ - G2Point( - x=E2( - a0=9377668754004040279698406674069547206576290350544684455848413744271894321832, - a1=11702600617119966915217386854353771222477427862839239072991366294351362953119, - p=p, - ), - y=E2( - a0=2339250257289832665920974862775225721388286867501651664202401324220401621360, - a1=3628891305020420995628487021870577687557167953941662416598489001684202886401, - p=p, - ), - ), - G2Point( - x=E2( - a0=10857046999023057135944570762232829481370756359578518086990519993285655852781, - a1=11559732032986387107991004021392285783925812861821192530917403151452391805634, - p=p, - ), - y=E2( - a0=8495653923123431417604973247489272438418190587263600148770280649306958101930, - a1=4082367875863433681332203403145435568316851327593401208105741076214120093531, - p=p, - ), - ), - G2Point( - x=E2( - a0=7912208710313447447762395792098481825752520616755888860068004689933335666613, - a1=12599857379517512478445603412764121041984228075771497593287716170335433683702, - p=p, - ), - y=E2( - a0=21679208693936337484429571887537508926366191105267550375038502782696042114705, - a1=11502426145685875357967720478366491326865907869902181704031346886834786027007, - p=p, - ), - ), - ] - - m, h = multi_miller_loop(P, Q, 3) - print(m) - print(h) diff --git a/archive_tmp/bn254/towers/e12.cairo b/archive_tmp/bn254/towers/e12.cairo deleted file mode 100644 index 3eafe72a..00000000 --- a/archive_tmp/bn254/towers/e12.cairo +++ /dev/null @@ -1,4048 +0,0 @@ -from src.bn254.towers.e6 import e6, E6, E6full -from src.bn254.towers.e2 import e2, E2 -from src.bn254.fq import ( - fq_bigint3, - BigInt3, - felt_to_bigint3, - UnreducedBigInt5, - UnreducedBigInt3, - bigint_sqr, - bigint_mul, - verify_zero5, - reduce_5, - reduce_3, - BASE_MIN_1, - unrededucedUint256_to_BigInt3, - assert_reduced_felt, -) -from starkware.cairo.common.uint256 import Uint256 -from starkware.cairo.common.alloc import alloc -from starkware.cairo.common.registers import get_fp_and_pc -from src.bn254.curve import N_LIMBS, DEGREE, BASE, P0, P1, P2, NON_RESIDUE_E2_a0, NON_RESIDUE_E2_a1 -from starkware.cairo.common.builtin_poseidon.poseidon import poseidon_hash, poseidon_hash_many -from starkware.cairo.common.cairo_builtins import PoseidonBuiltin, BitwiseBuiltin -from starkware.cairo.common.poseidon_state import PoseidonBuiltinState - -struct E12 { - c0: E6*, - c1: E6*, -} -struct E12D { - w0: BigInt3, - w1: BigInt3, - w2: BigInt3, - w3: BigInt3, - w4: BigInt3, - w5: BigInt3, - w6: BigInt3, - w7: BigInt3, - w8: BigInt3, - w9: BigInt3, - w10: BigInt3, - w11: BigInt3, -} -struct E12DU { - w0: UnreducedBigInt3, - w1: UnreducedBigInt3, - w2: UnreducedBigInt3, - w3: UnreducedBigInt3, - w4: UnreducedBigInt3, - w5: UnreducedBigInt3, - w6: UnreducedBigInt3, - w7: UnreducedBigInt3, - w8: UnreducedBigInt3, - w9: UnreducedBigInt3, - w10: UnreducedBigInt3, - w11: UnreducedBigInt3, -} - -struct E11DU { - w0: Uint256, - w1: Uint256, - w2: Uint256, - w3: Uint256, - w4: Uint256, - w5: Uint256, - w6: Uint256, - w7: Uint256, - w8: Uint256, - w9: Uint256, - w10: Uint256, -} - -struct E11DU3 { - w0: BigInt3, - w1: BigInt3, - w2: BigInt3, - w3: BigInt3, - w4: BigInt3, - w5: BigInt3, - w6: BigInt3, - w7: BigInt3, - w8: BigInt3, - w9: BigInt3, - w10: BigInt3, -} - -struct E9full { - w0: Uint256, - w1: Uint256, - w2: Uint256, - w3: Uint256, - w4: Uint256, - w5: Uint256, - w6: Uint256, - w7: Uint256, - w8: Uint256, -} - -struct E7full { - w0: Uint256, - w1: Uint256, - w2: Uint256, - w3: Uint256, - w4: Uint256, - w5: Uint256, - w6: Uint256, -} - -// 034 Gnark element converted to Fp12/Fp representation -struct E12full034 { - w1: BigInt3, - w3: BigInt3, - w7: BigInt3, - w9: BigInt3, -} - -struct E12full01234 { - w0: BigInt3, - w1: BigInt3, - w2: BigInt3, - w3: BigInt3, - w4: BigInt3, - w6: BigInt3, - w7: BigInt3, - w8: BigInt3, - w9: BigInt3, - w10: BigInt3, - w11: BigInt3, -} - -struct ZPowers11 { - z_1: BigInt3, - z_2: BigInt3, - z_3: BigInt3, - z_4: BigInt3, - z_5: BigInt3, - z_6: BigInt3, - z_7: BigInt3, - z_8: BigInt3, - z_9: BigInt3, - z_10: BigInt3, - z_11: BigInt3, -} - -struct PolyAcc12 { - xy: UnreducedBigInt3, - q: E11DU, - r: E12DU, -} - -struct PolyAcc034 { - xy: UnreducedBigInt3, - q: E9full, - r: E12DU, -} - -struct PolyAcc034034 { - xy: UnreducedBigInt3, - q: E7full, - r: E12full01234, -} - -namespace e12_tricks { - func square{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_11_ptr: ZPowers11*, - continuable_hash: felt, - poly_acc_12: PolyAcc12*, - }(x_ptr: E12D*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E12D = [x_ptr]; - local z_pow1_11: ZPowers11 = [z_pow1_11_ptr]; - local r_w: E12D; - local q_w: E11DU; - %{ - from tools.py.polynomial import Polynomial - from tools.py.field import BaseFieldElement - from src.hints.fq import pack_e12d, fill_e12d, fill_uint256 - from starkware.cairo.common.cairo_secp.secp_utils import split - from tools.make.utils import split_128 - from src.bn254.curve import IRREDUCIBLE_POLY_12, field - - x=pack_e12d(ids.x, ids.N_LIMBS, ids.BASE) - x_poly=Polynomial([BaseFieldElement(x[i], field) for i in range(12)]) - z_poly=x_poly*x_poly - - z_polyr=z_poly % IRREDUCIBLE_POLY_12 - z_polyq=z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs)<=11 - # extend z_polyq with 0 to make it len 11: - z_polyq_coeffs = z_polyq_coeffs + (11-len(z_polyq_coeffs))*[0] - # extend z_polyr with 0 to make it len 12: - z_polyr_coeffs = z_polyr_coeffs + (12-len(z_polyr_coeffs))*[0] - - for i in range(11): - fill_uint256(z_polyq_coeffs[i], getattr(ids.q_w, f'w{i}')) - - fill_e12d(z_polyr_coeffs, ids.r_w, ids.N_LIMBS, ids.BASE) - %} - assert [range_check_ptr + 0] = r_w.w0.d0; - assert [range_check_ptr + 1] = r_w.w0.d1; - assert [range_check_ptr + 2] = r_w.w0.d2; - assert [range_check_ptr + 3] = r_w.w1.d0; - assert [range_check_ptr + 4] = r_w.w1.d1; - assert [range_check_ptr + 5] = r_w.w1.d2; - assert [range_check_ptr + 6] = r_w.w2.d0; - assert [range_check_ptr + 7] = r_w.w2.d1; - assert [range_check_ptr + 8] = r_w.w2.d2; - assert [range_check_ptr + 9] = r_w.w3.d0; - assert [range_check_ptr + 10] = r_w.w3.d1; - assert [range_check_ptr + 11] = r_w.w3.d2; - assert [range_check_ptr + 12] = r_w.w4.d0; - assert [range_check_ptr + 13] = r_w.w4.d1; - assert [range_check_ptr + 14] = r_w.w4.d2; - assert [range_check_ptr + 15] = r_w.w5.d0; - assert [range_check_ptr + 16] = r_w.w5.d1; - assert [range_check_ptr + 17] = r_w.w5.d2; - assert [range_check_ptr + 18] = r_w.w6.d0; - assert [range_check_ptr + 19] = r_w.w6.d1; - assert [range_check_ptr + 20] = r_w.w6.d2; - assert [range_check_ptr + 21] = r_w.w7.d0; - assert [range_check_ptr + 22] = r_w.w7.d1; - assert [range_check_ptr + 23] = r_w.w7.d2; - assert [range_check_ptr + 24] = r_w.w8.d0; - assert [range_check_ptr + 25] = r_w.w8.d1; - assert [range_check_ptr + 26] = r_w.w8.d2; - assert [range_check_ptr + 27] = r_w.w9.d0; - assert [range_check_ptr + 28] = r_w.w9.d1; - assert [range_check_ptr + 29] = r_w.w9.d2; - assert [range_check_ptr + 30] = r_w.w10.d0; - assert [range_check_ptr + 31] = r_w.w10.d1; - assert [range_check_ptr + 32] = r_w.w10.d2; - assert [range_check_ptr + 33] = r_w.w11.d0; - assert [range_check_ptr + 34] = r_w.w11.d1; - assert [range_check_ptr + 35] = r_w.w11.d2; - assert [range_check_ptr + 36] = q_w.w0.low; - assert [range_check_ptr + 37] = q_w.w0.high; - assert [range_check_ptr + 38] = q_w.w1.low; - assert [range_check_ptr + 39] = q_w.w1.high; - assert [range_check_ptr + 40] = q_w.w2.low; - assert [range_check_ptr + 41] = q_w.w2.high; - assert [range_check_ptr + 42] = q_w.w3.low; - assert [range_check_ptr + 43] = q_w.w3.high; - assert [range_check_ptr + 44] = q_w.w4.low; - assert [range_check_ptr + 45] = q_w.w4.high; - assert [range_check_ptr + 46] = q_w.w5.low; - assert [range_check_ptr + 47] = q_w.w5.high; - assert [range_check_ptr + 48] = q_w.w6.low; - assert [range_check_ptr + 49] = q_w.w6.high; - assert [range_check_ptr + 50] = q_w.w7.low; - assert [range_check_ptr + 51] = q_w.w7.high; - assert [range_check_ptr + 52] = q_w.w8.low; - assert [range_check_ptr + 53] = q_w.w8.high; - assert [range_check_ptr + 54] = q_w.w9.low; - assert [range_check_ptr + 55] = q_w.w9.high; - assert [range_check_ptr + 56] = q_w.w10.low; - assert [range_check_ptr + 57] = q_w.w10.high; - assert [range_check_ptr + 58] = 12 * 3 * BASE_MIN_1 - ( - r_w.w0.d0 + - r_w.w0.d1 + - r_w.w0.d2 + - r_w.w1.d0 + - r_w.w1.d1 + - r_w.w1.d2 + - r_w.w2.d0 + - r_w.w2.d1 + - r_w.w2.d2 + - r_w.w3.d0 + - r_w.w3.d1 + - r_w.w3.d2 + - r_w.w4.d0 + - r_w.w4.d1 + - r_w.w4.d2 + - r_w.w5.d0 + - r_w.w5.d1 + - r_w.w5.d2 + - r_w.w6.d0 + - r_w.w6.d1 + - r_w.w6.d2 + - r_w.w7.d0 + - r_w.w7.d1 + - r_w.w7.d2 + - r_w.w8.d0 + - r_w.w8.d1 + - r_w.w8.d2 + - r_w.w9.d0 + - r_w.w9.d1 + - r_w.w9.d2 + - r_w.w10.d0 + - r_w.w10.d1 + - r_w.w10.d2 + - r_w.w11.d0 + - r_w.w11.d1 + - r_w.w11.d2 - ); - - tempvar range_check_ptr = range_check_ptr + 59; - - tempvar two = 2; - assert poseidon_ptr.input = PoseidonBuiltinState( - s0=x.w0.d0 * x.w0.d1, s1=continuable_hash, s2=two - ); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=x.w0.d2 * x.w1.d0, s1=poseidon_ptr[0].output.s0, s2=two - ); - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=x.w1.d1 * x.w1.d2, s1=poseidon_ptr[1].output.s0, s2=two - ); - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=x.w2.d0 * x.w2.d1, s1=poseidon_ptr[2].output.s0, s2=two - ); - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=x.w2.d2 * x.w3.d0, s1=poseidon_ptr[3].output.s0, s2=two - ); - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=x.w3.d1 * x.w3.d2, s1=poseidon_ptr[4].output.s0, s2=two - ); - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=x.w4.d0 * x.w4.d1, s1=poseidon_ptr[5].output.s0, s2=two - ); - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=x.w4.d2 * x.w5.d0, s1=poseidon_ptr[6].output.s0, s2=two - ); - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=x.w5.d1 * x.w5.d2, s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=x.w6.d0 * x.w6.d1, s1=poseidon_ptr[8].output.s0, s2=two - ); - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=x.w6.d2 * x.w7.d0, s1=poseidon_ptr[9].output.s0, s2=two - ); - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=x.w7.d1 * x.w7.d2, s1=poseidon_ptr[10].output.s0, s2=two - ); - assert poseidon_ptr[12].input = PoseidonBuiltinState( - s0=x.w8.d0 * x.w8.d1, s1=poseidon_ptr[11].output.s0, s2=two - ); - assert poseidon_ptr[13].input = PoseidonBuiltinState( - s0=x.w8.d2 * x.w9.d0, s1=poseidon_ptr[12].output.s0, s2=two - ); - assert poseidon_ptr[14].input = PoseidonBuiltinState( - s0=x.w9.d1 * x.w9.d2, s1=poseidon_ptr[13].output.s0, s2=two - ); - assert poseidon_ptr[15].input = PoseidonBuiltinState( - s0=x.w10.d0 * x.w10.d1, s1=poseidon_ptr[14].output.s0, s2=two - ); - assert poseidon_ptr[16].input = PoseidonBuiltinState( - s0=x.w10.d2 * x.w11.d0, s1=poseidon_ptr[15].output.s0, s2=two - ); - assert poseidon_ptr[17].input = PoseidonBuiltinState( - s0=x.w11.d1 * x.w11.d2, s1=poseidon_ptr[16].output.s0, s2=two - ); - assert poseidon_ptr[18].input = PoseidonBuiltinState( - s0=q_w.w0.low * r_w.w0.d0, s1=poseidon_ptr[17].output.s0, s2=two - ); - assert poseidon_ptr[19].input = PoseidonBuiltinState( - s0=q_w.w0.high * r_w.w0.d1, s1=poseidon_ptr[18].output.s0, s2=two - ); - assert poseidon_ptr[20].input = PoseidonBuiltinState( - s0=q_w.w1.low * r_w.w0.d2, s1=poseidon_ptr[19].output.s0, s2=two - ); - assert poseidon_ptr[21].input = PoseidonBuiltinState( - s0=q_w.w1.high * r_w.w1.d0, s1=poseidon_ptr[20].output.s0, s2=two - ); - assert poseidon_ptr[22].input = PoseidonBuiltinState( - s0=q_w.w2.low * r_w.w1.d1, s1=poseidon_ptr[21].output.s0, s2=two - ); - assert poseidon_ptr[23].input = PoseidonBuiltinState( - s0=q_w.w2.high * r_w.w1.d2, s1=poseidon_ptr[22].output.s0, s2=two - ); - assert poseidon_ptr[24].input = PoseidonBuiltinState( - s0=q_w.w3.low * r_w.w2.d0, s1=poseidon_ptr[23].output.s0, s2=two - ); - assert poseidon_ptr[25].input = PoseidonBuiltinState( - s0=q_w.w3.high * r_w.w2.d1, s1=poseidon_ptr[24].output.s0, s2=two - ); - assert poseidon_ptr[26].input = PoseidonBuiltinState( - s0=q_w.w4.low * r_w.w2.d2, s1=poseidon_ptr[25].output.s0, s2=two - ); - assert poseidon_ptr[27].input = PoseidonBuiltinState( - s0=q_w.w4.high * r_w.w3.d0, s1=poseidon_ptr[26].output.s0, s2=two - ); - assert poseidon_ptr[28].input = PoseidonBuiltinState( - s0=q_w.w5.low * r_w.w3.d1, s1=poseidon_ptr[27].output.s0, s2=two - ); - assert poseidon_ptr[29].input = PoseidonBuiltinState( - s0=q_w.w5.high * r_w.w3.d2, s1=poseidon_ptr[28].output.s0, s2=two - ); - assert poseidon_ptr[30].input = PoseidonBuiltinState( - s0=q_w.w6.low * r_w.w4.d0, s1=poseidon_ptr[29].output.s0, s2=two - ); - assert poseidon_ptr[31].input = PoseidonBuiltinState( - s0=q_w.w6.high * r_w.w4.d1, s1=poseidon_ptr[30].output.s0, s2=two - ); - assert poseidon_ptr[32].input = PoseidonBuiltinState( - s0=q_w.w7.low * r_w.w4.d2, s1=poseidon_ptr[31].output.s0, s2=two - ); - assert poseidon_ptr[33].input = PoseidonBuiltinState( - s0=q_w.w7.high * r_w.w5.d0, s1=poseidon_ptr[32].output.s0, s2=two - ); - assert poseidon_ptr[34].input = PoseidonBuiltinState( - s0=q_w.w8.low * r_w.w5.d1, s1=poseidon_ptr[33].output.s0, s2=two - ); - assert poseidon_ptr[35].input = PoseidonBuiltinState( - s0=q_w.w8.high * r_w.w5.d2, s1=poseidon_ptr[34].output.s0, s2=two - ); - assert poseidon_ptr[36].input = PoseidonBuiltinState( - s0=q_w.w9.low * r_w.w6.d0, s1=poseidon_ptr[35].output.s0, s2=two - ); - assert poseidon_ptr[37].input = PoseidonBuiltinState( - s0=q_w.w9.high * r_w.w6.d1, s1=poseidon_ptr[36].output.s0, s2=two - ); - assert poseidon_ptr[38].input = PoseidonBuiltinState( - s0=q_w.w10.low * r_w.w6.d2, s1=poseidon_ptr[37].output.s0, s2=two - ); - assert poseidon_ptr[39].input = PoseidonBuiltinState( - s0=q_w.w10.high * r_w.w7.d0, s1=poseidon_ptr[38].output.s0, s2=two - ); - assert poseidon_ptr[40].input = PoseidonBuiltinState( - s0=r_w.w7.d1 * r_w.w7.d2, s1=poseidon_ptr[39].output.s0, s2=two - ); - assert poseidon_ptr[41].input = PoseidonBuiltinState( - s0=r_w.w8.d0 * r_w.w8.d1, s1=poseidon_ptr[40].output.s0, s2=two - ); - assert poseidon_ptr[42].input = PoseidonBuiltinState( - s0=r_w.w8.d2 * r_w.w9.d0, s1=poseidon_ptr[41].output.s0, s2=two - ); - assert poseidon_ptr[43].input = PoseidonBuiltinState( - s0=r_w.w9.d1 * r_w.w9.d2, s1=poseidon_ptr[42].output.s0, s2=two - ); - assert poseidon_ptr[44].input = PoseidonBuiltinState( - s0=r_w.w10.d0 * r_w.w10.d1, s1=poseidon_ptr[43].output.s0, s2=two - ); - assert poseidon_ptr[45].input = PoseidonBuiltinState( - s0=r_w.w10.d2 * r_w.w11.d0, s1=poseidon_ptr[44].output.s0, s2=two - ); - assert poseidon_ptr[46].input = PoseidonBuiltinState( - s0=r_w.w11.d1 * r_w.w11.d2, s1=poseidon_ptr[45].output.s0, s2=two - ); - - tempvar x_of_z_w1 = UnreducedBigInt5( - d0=x.w1.d0 * z_pow1_11.z_1.d0, - d1=x.w1.d0 * z_pow1_11.z_1.d1 + x.w1.d1 * z_pow1_11.z_1.d0, - d2=x.w1.d0 * z_pow1_11.z_1.d2 + x.w1.d1 * z_pow1_11.z_1.d1 + x.w1.d2 * z_pow1_11.z_1.d0, - d3=x.w1.d1 * z_pow1_11.z_1.d2 + x.w1.d2 * z_pow1_11.z_1.d1, - d4=x.w1.d2 * z_pow1_11.z_1.d2, - ); - tempvar x_of_z_w2 = UnreducedBigInt5( - d0=x.w2.d0 * z_pow1_11.z_2.d0, - d1=x.w2.d0 * z_pow1_11.z_2.d1 + x.w2.d1 * z_pow1_11.z_2.d0, - d2=x.w2.d0 * z_pow1_11.z_2.d2 + x.w2.d1 * z_pow1_11.z_2.d1 + x.w2.d2 * z_pow1_11.z_2.d0, - d3=x.w2.d1 * z_pow1_11.z_2.d2 + x.w2.d2 * z_pow1_11.z_2.d1, - d4=x.w2.d2 * z_pow1_11.z_2.d2, - ); - - tempvar x_of_z_w3 = UnreducedBigInt5( - d0=x.w3.d0 * z_pow1_11.z_3.d0, - d1=x.w3.d0 * z_pow1_11.z_3.d1 + x.w3.d1 * z_pow1_11.z_3.d0, - d2=x.w3.d0 * z_pow1_11.z_3.d2 + x.w3.d1 * z_pow1_11.z_3.d1 + x.w3.d2 * z_pow1_11.z_3.d0, - d3=x.w3.d1 * z_pow1_11.z_3.d2 + x.w3.d2 * z_pow1_11.z_3.d1, - d4=x.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar x_of_z_w4 = UnreducedBigInt5( - d0=x.w4.d0 * z_pow1_11.z_4.d0, - d1=x.w4.d0 * z_pow1_11.z_4.d1 + x.w4.d1 * z_pow1_11.z_4.d0, - d2=x.w4.d0 * z_pow1_11.z_4.d2 + x.w4.d1 * z_pow1_11.z_4.d1 + x.w4.d2 * z_pow1_11.z_4.d0, - d3=x.w4.d1 * z_pow1_11.z_4.d2 + x.w4.d2 * z_pow1_11.z_4.d1, - d4=x.w4.d2 * z_pow1_11.z_4.d2, - ); - - tempvar x_of_z_w5 = UnreducedBigInt5( - d0=x.w5.d0 * z_pow1_11.z_5.d0, - d1=x.w5.d0 * z_pow1_11.z_5.d1 + x.w5.d1 * z_pow1_11.z_5.d0, - d2=x.w5.d0 * z_pow1_11.z_5.d2 + x.w5.d1 * z_pow1_11.z_5.d1 + x.w5.d2 * z_pow1_11.z_5.d0, - d3=x.w5.d1 * z_pow1_11.z_5.d2 + x.w5.d2 * z_pow1_11.z_5.d1, - d4=x.w5.d2 * z_pow1_11.z_5.d2, - ); - - tempvar x_of_z_w6 = UnreducedBigInt5( - d0=x.w6.d0 * z_pow1_11.z_6.d0, - d1=x.w6.d0 * z_pow1_11.z_6.d1 + x.w6.d1 * z_pow1_11.z_6.d0, - d2=x.w6.d0 * z_pow1_11.z_6.d2 + x.w6.d1 * z_pow1_11.z_6.d1 + x.w6.d2 * z_pow1_11.z_6.d0, - d3=x.w6.d1 * z_pow1_11.z_6.d2 + x.w6.d2 * z_pow1_11.z_6.d1, - d4=x.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar x_of_z_w7 = UnreducedBigInt5( - d0=x.w7.d0 * z_pow1_11.z_7.d0, - d1=x.w7.d0 * z_pow1_11.z_7.d1 + x.w7.d1 * z_pow1_11.z_7.d0, - d2=x.w7.d0 * z_pow1_11.z_7.d2 + x.w7.d1 * z_pow1_11.z_7.d1 + x.w7.d2 * z_pow1_11.z_7.d0, - d3=x.w7.d1 * z_pow1_11.z_7.d2 + x.w7.d2 * z_pow1_11.z_7.d1, - d4=x.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar x_of_z_w8 = UnreducedBigInt5( - d0=x.w8.d0 * z_pow1_11.z_8.d0, - d1=x.w8.d0 * z_pow1_11.z_8.d1 + x.w8.d1 * z_pow1_11.z_8.d0, - d2=x.w8.d0 * z_pow1_11.z_8.d2 + x.w8.d1 * z_pow1_11.z_8.d1 + x.w8.d2 * z_pow1_11.z_8.d0, - d3=x.w8.d1 * z_pow1_11.z_8.d2 + x.w8.d2 * z_pow1_11.z_8.d1, - d4=x.w8.d2 * z_pow1_11.z_8.d2, - ); - - tempvar x_of_z_w9 = UnreducedBigInt5( - d0=x.w9.d0 * z_pow1_11.z_9.d0, - d1=x.w9.d0 * z_pow1_11.z_9.d1 + x.w9.d1 * z_pow1_11.z_9.d0, - d2=x.w9.d0 * z_pow1_11.z_9.d2 + x.w9.d1 * z_pow1_11.z_9.d1 + x.w9.d2 * z_pow1_11.z_9.d0, - d3=x.w9.d1 * z_pow1_11.z_9.d2 + x.w9.d2 * z_pow1_11.z_9.d1, - d4=x.w9.d2 * z_pow1_11.z_9.d2, - ); - - tempvar x_of_z_w10 = UnreducedBigInt5( - d0=x.w10.d0 * z_pow1_11.z_10.d0, - d1=x.w10.d0 * z_pow1_11.z_10.d1 + x.w10.d1 * z_pow1_11.z_10.d0, - d2=x.w10.d0 * z_pow1_11.z_10.d2 + x.w10.d1 * z_pow1_11.z_10.d1 + x.w10.d2 * - z_pow1_11.z_10.d0, - d3=x.w10.d1 * z_pow1_11.z_10.d2 + x.w10.d2 * z_pow1_11.z_10.d1, - d4=x.w10.d2 * z_pow1_11.z_10.d2, - ); - - tempvar x_of_z_w11 = UnreducedBigInt5( - d0=x.w11.d0 * z_pow1_11.z_11.d0, - d1=x.w11.d0 * z_pow1_11.z_11.d1 + x.w11.d1 * z_pow1_11.z_11.d0, - d2=x.w11.d0 * z_pow1_11.z_11.d2 + x.w11.d1 * z_pow1_11.z_11.d1 + x.w11.d2 * - z_pow1_11.z_11.d0, - d3=x.w11.d1 * z_pow1_11.z_11.d2 + x.w11.d2 * z_pow1_11.z_11.d1, - d4=x.w11.d2 * z_pow1_11.z_11.d2, - ); - - let x_of_z = reduce_5( - UnreducedBigInt5( - d0=x.w0.d0 + x_of_z_w1.d0 + x_of_z_w2.d0 + x_of_z_w3.d0 + x_of_z_w4.d0 + - x_of_z_w5.d0 + x_of_z_w6.d0 + x_of_z_w7.d0 + x_of_z_w8.d0 + x_of_z_w9.d0 + - x_of_z_w10.d0 + x_of_z_w11.d0, - d1=x.w0.d1 + x_of_z_w1.d1 + x_of_z_w2.d1 + x_of_z_w3.d1 + x_of_z_w4.d1 + - x_of_z_w5.d1 + x_of_z_w6.d1 + x_of_z_w7.d1 + x_of_z_w8.d1 + x_of_z_w9.d1 + - x_of_z_w10.d1 + x_of_z_w11.d1, - d2=x.w0.d2 + x_of_z_w1.d2 + x_of_z_w2.d2 + x_of_z_w3.d2 + x_of_z_w4.d2 + - x_of_z_w5.d2 + x_of_z_w6.d2 + x_of_z_w7.d2 + x_of_z_w8.d2 + x_of_z_w9.d2 + - x_of_z_w10.d2 + x_of_z_w11.d2, - d3=x_of_z_w1.d3 + x_of_z_w2.d3 + x_of_z_w3.d3 + x_of_z_w4.d3 + x_of_z_w5.d3 + - x_of_z_w6.d3 + x_of_z_w7.d3 + x_of_z_w8.d3 + x_of_z_w9.d3 + x_of_z_w10.d3 + - x_of_z_w11.d3, - d4=x_of_z_w1.d4 + x_of_z_w2.d4 + x_of_z_w3.d4 + x_of_z_w4.d4 + x_of_z_w5.d4 + - x_of_z_w6.d4 + x_of_z_w7.d4 + x_of_z_w8.d4 + x_of_z_w9.d4 + x_of_z_w10.d4 + - x_of_z_w11.d4, - ), - ); - - let xy_acc = reduce_5( - UnreducedBigInt5( - d0=x_of_z.d0 * x_of_z.d0, - d1=two * x_of_z.d0 * x_of_z.d1, - d2=two * x_of_z.d0 * x_of_z.d2 + x_of_z.d1 * x_of_z.d1, - d3=two * x_of_z.d1 * x_of_z.d2, - d4=x_of_z.d2 * x_of_z.d2, - ), - ); - - let poseidon_ptr = poseidon_ptr + 47 * PoseidonBuiltin.SIZE; - let continuable_hash = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let random_linear_combination_coeff = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s1; - - assert bitwise_ptr.x = random_linear_combination_coeff; - assert bitwise_ptr.y = BASE_MIN_1; - tempvar c_i = bitwise_ptr.x_and_y; - let bitwise_ptr = bitwise_ptr + BitwiseBuiltin.SIZE; - - local poly_acc_12_f: PolyAcc12 = PolyAcc12( - xy=UnreducedBigInt3( - d0=poly_acc_12.xy.d0 + c_i * xy_acc.d0, - d1=poly_acc_12.xy.d1 + c_i * xy_acc.d1, - d2=poly_acc_12.xy.d2 + c_i * xy_acc.d2, - ), - q=E11DU( - Uint256( - c_i * q_w.w0.low + poly_acc_12.q.w0.low, - c_i * q_w.w0.high + poly_acc_12.q.w0.high, - ), - Uint256( - c_i * q_w.w1.low + poly_acc_12.q.w1.low, - c_i * q_w.w1.high + poly_acc_12.q.w1.high, - ), - Uint256( - c_i * q_w.w2.low + poly_acc_12.q.w2.low, - c_i * q_w.w2.high + poly_acc_12.q.w2.high, - ), - Uint256( - c_i * q_w.w3.low + poly_acc_12.q.w3.low, - c_i * q_w.w3.high + poly_acc_12.q.w3.high, - ), - Uint256( - c_i * q_w.w4.low + poly_acc_12.q.w4.low, - c_i * q_w.w4.high + poly_acc_12.q.w4.high, - ), - Uint256( - c_i * q_w.w5.low + poly_acc_12.q.w5.low, - c_i * q_w.w5.high + poly_acc_12.q.w5.high, - ), - Uint256( - c_i * q_w.w6.low + poly_acc_12.q.w6.low, - c_i * q_w.w6.high + poly_acc_12.q.w6.high, - ), - Uint256( - c_i * q_w.w7.low + poly_acc_12.q.w7.low, - c_i * q_w.w7.high + poly_acc_12.q.w7.high, - ), - Uint256( - c_i * q_w.w8.low + poly_acc_12.q.w8.low, - c_i * q_w.w8.high + poly_acc_12.q.w8.high, - ), - Uint256( - c_i * q_w.w9.low + poly_acc_12.q.w9.low, - c_i * q_w.w9.high + poly_acc_12.q.w9.high, - ), - Uint256( - c_i * q_w.w10.low + poly_acc_12.q.w10.low, - c_i * q_w.w10.high + poly_acc_12.q.w10.high, - ), - ), - r=E12DU( - UnreducedBigInt3( - d0=c_i * r_w.w0.d0 + poly_acc_12.r.w0.d0, - d1=c_i * r_w.w0.d1 + poly_acc_12.r.w0.d1, - d2=c_i * r_w.w0.d2 + poly_acc_12.r.w0.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w1.d0 + poly_acc_12.r.w1.d0, - d1=c_i * r_w.w1.d1 + poly_acc_12.r.w1.d1, - d2=c_i * r_w.w1.d2 + poly_acc_12.r.w1.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w2.d0 + poly_acc_12.r.w2.d0, - d1=c_i * r_w.w2.d1 + poly_acc_12.r.w2.d1, - d2=c_i * r_w.w2.d2 + poly_acc_12.r.w2.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w3.d0 + poly_acc_12.r.w3.d0, - d1=c_i * r_w.w3.d1 + poly_acc_12.r.w3.d1, - d2=c_i * r_w.w3.d2 + poly_acc_12.r.w3.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w4.d0 + poly_acc_12.r.w4.d0, - d1=c_i * r_w.w4.d1 + poly_acc_12.r.w4.d1, - d2=c_i * r_w.w4.d2 + poly_acc_12.r.w4.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w5.d0 + poly_acc_12.r.w5.d0, - d1=c_i * r_w.w5.d1 + poly_acc_12.r.w5.d1, - d2=c_i * r_w.w5.d2 + poly_acc_12.r.w5.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w6.d0 + poly_acc_12.r.w6.d0, - d1=c_i * r_w.w6.d1 + poly_acc_12.r.w6.d1, - d2=c_i * r_w.w6.d2 + poly_acc_12.r.w6.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w7.d0 + poly_acc_12.r.w7.d0, - d1=c_i * r_w.w7.d1 + poly_acc_12.r.w7.d1, - d2=c_i * r_w.w7.d2 + poly_acc_12.r.w7.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w8.d0 + poly_acc_12.r.w8.d0, - d1=c_i * r_w.w8.d1 + poly_acc_12.r.w8.d1, - d2=c_i * r_w.w8.d2 + poly_acc_12.r.w8.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w9.d0 + poly_acc_12.r.w9.d0, - d1=c_i * r_w.w9.d1 + poly_acc_12.r.w9.d1, - d2=c_i * r_w.w9.d2 + poly_acc_12.r.w9.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w10.d0 + poly_acc_12.r.w10.d0, - d1=c_i * r_w.w10.d1 + poly_acc_12.r.w10.d1, - d2=c_i * r_w.w10.d2 + poly_acc_12.r.w10.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w11.d0 + poly_acc_12.r.w11.d0, - d1=c_i * r_w.w11.d1 + poly_acc_12.r.w11.d1, - d2=c_i * r_w.w11.d2 + poly_acc_12.r.w11.d2, - ), - ), - ); - let poly_acc_12 = &poly_acc_12_f; - return &r_w; - } - - func mul034{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_11_ptr: ZPowers11*, - continuable_hash: felt, - poly_acc_034: PolyAcc034*, - }(x_ptr: E12D*, y_ptr: E12full034*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E12D = [x_ptr]; - local y: E12full034 = [y_ptr]; - local z_pow1_11: ZPowers11 = [z_pow1_11_ptr]; - local r_w: E12D; - local q_w: E9full; - - %{ - from tools.py.polynomial import Polynomial - from tools.py.field import BaseFieldElement, BaseField - from starkware.cairo.common.cairo_secp.secp_utils import split - from tools.make.utils import split_128 - from src.hints.fq import pack_e12d, fill_e12d - from src.bn254.curve import IRREDUCIBLE_POLY_12 - - - p=0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 - field = BaseField(p) - x=12*[0] - y=[1]+11*[0] - x_refs=[ids.x.w0, ids.x.w1, ids.x.w2, ids.x.w3, ids.x.w4, ids.x.w5, ids.x.w6, ids.x.w7, ids.x.w8, ids.x.w9, ids.x.w10, ids.x.w11] - y_refs=[(1,ids.y.w1), (3,ids.y.w3), (7,ids.y.w7), (9,ids.y.w9)] - for i in range(ids.N_LIMBS): - for index, ref in y_refs: - y[index]+=as_int(getattr(ref, 'd'+str(i)), PRIME) * ids.BASE**i - for k in range(12): - x[k]+=as_int(getattr(x_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - x_poly=Polynomial([BaseFieldElement(x[i], field) for i in range(12)]) - y_poly=Polynomial([BaseFieldElement(y[i], field) for i in range(12)]) - z_poly=x_poly*y_poly - z_polyr=z_poly % IRREDUCIBLE_POLY_12 - z_polyq=z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs)<=9, f"len z_polyq_coeffs: {len(z_polyq_coeffs)}, degree: {z_polyq.degree()}" - assert len(z_polyr_coeffs)<=12, f"len z_polyr_coeffs: {z_polyr_coeffs}, degree: {z_polyr.degree()}" - # extend z_polyq with 0 to make it len 9: - z_polyq_coeffs = z_polyq_coeffs + (9-len(z_polyq_coeffs))*[0] - # extend z_polyr with 0 to make it len 12: - z_polyr_coeffs = z_polyr_coeffs + (12-len(z_polyr_coeffs))*[0] - for i in range(12): - val = split(z_polyr_coeffs[i]%p) - for k in range(ids.N_LIMBS): - rsetattr(ids.r_w, f'w{i}.d{k}', val[k]) - for i in range(9): - val = split_128(z_polyq_coeffs[i]%p) - rsetattr(ids.q_w, f'w{i}.low', val[0]) - rsetattr(ids.q_w, f'w{i}.high', val[1]) - %} - assert [range_check_ptr + 0] = r_w.w0.d0; - assert [range_check_ptr + 1] = r_w.w0.d1; - assert [range_check_ptr + 2] = r_w.w0.d2; - assert [range_check_ptr + 3] = r_w.w1.d0; - assert [range_check_ptr + 4] = r_w.w1.d1; - assert [range_check_ptr + 5] = r_w.w1.d2; - assert [range_check_ptr + 6] = r_w.w2.d0; - assert [range_check_ptr + 7] = r_w.w2.d1; - assert [range_check_ptr + 8] = r_w.w2.d2; - assert [range_check_ptr + 9] = r_w.w3.d0; - assert [range_check_ptr + 10] = r_w.w3.d1; - assert [range_check_ptr + 11] = r_w.w3.d2; - assert [range_check_ptr + 12] = r_w.w4.d0; - assert [range_check_ptr + 13] = r_w.w4.d1; - assert [range_check_ptr + 14] = r_w.w4.d2; - assert [range_check_ptr + 15] = r_w.w5.d0; - assert [range_check_ptr + 16] = r_w.w5.d1; - assert [range_check_ptr + 17] = r_w.w5.d2; - assert [range_check_ptr + 18] = r_w.w6.d0; - assert [range_check_ptr + 19] = r_w.w6.d1; - assert [range_check_ptr + 20] = r_w.w6.d2; - assert [range_check_ptr + 21] = r_w.w7.d0; - assert [range_check_ptr + 22] = r_w.w7.d1; - assert [range_check_ptr + 23] = r_w.w7.d2; - assert [range_check_ptr + 24] = r_w.w8.d0; - assert [range_check_ptr + 25] = r_w.w8.d1; - assert [range_check_ptr + 26] = r_w.w8.d2; - assert [range_check_ptr + 27] = r_w.w9.d0; - assert [range_check_ptr + 28] = r_w.w9.d1; - assert [range_check_ptr + 29] = r_w.w9.d2; - assert [range_check_ptr + 30] = r_w.w10.d0; - assert [range_check_ptr + 31] = r_w.w10.d1; - assert [range_check_ptr + 32] = r_w.w10.d2; - assert [range_check_ptr + 33] = r_w.w11.d0; - assert [range_check_ptr + 34] = r_w.w11.d1; - assert [range_check_ptr + 35] = r_w.w11.d2; - assert [range_check_ptr + 36] = q_w.w0.low; - assert [range_check_ptr + 37] = q_w.w0.high; - assert [range_check_ptr + 38] = q_w.w1.low; - assert [range_check_ptr + 39] = q_w.w1.high; - assert [range_check_ptr + 40] = q_w.w2.low; - assert [range_check_ptr + 41] = q_w.w2.high; - assert [range_check_ptr + 42] = q_w.w3.low; - assert [range_check_ptr + 43] = q_w.w3.high; - assert [range_check_ptr + 44] = q_w.w4.low; - assert [range_check_ptr + 45] = q_w.w4.high; - assert [range_check_ptr + 46] = q_w.w5.low; - assert [range_check_ptr + 47] = q_w.w5.high; - assert [range_check_ptr + 48] = q_w.w6.low; - assert [range_check_ptr + 49] = q_w.w6.high; - assert [range_check_ptr + 50] = q_w.w7.low; - assert [range_check_ptr + 51] = q_w.w7.high; - assert [range_check_ptr + 52] = q_w.w8.low; - assert [range_check_ptr + 53] = q_w.w8.high; - assert [range_check_ptr + 54] = 3 * 12 * BASE_MIN_1 - ( - r_w.w0.d0 + - r_w.w0.d1 + - r_w.w0.d2 + - r_w.w1.d0 + - r_w.w1.d1 + - r_w.w1.d2 + - r_w.w2.d0 + - r_w.w2.d1 + - r_w.w2.d2 + - r_w.w3.d0 + - r_w.w3.d1 + - r_w.w3.d2 + - r_w.w4.d0 + - r_w.w4.d1 + - r_w.w4.d2 + - r_w.w5.d0 + - r_w.w5.d1 + - r_w.w5.d2 + - r_w.w6.d0 + - r_w.w6.d1 + - r_w.w6.d2 + - r_w.w7.d0 + - r_w.w7.d1 + - r_w.w7.d2 + - r_w.w8.d0 + - r_w.w8.d1 + - r_w.w8.d2 + - r_w.w9.d0 + - r_w.w9.d1 + - r_w.w9.d2 + - r_w.w10.d0 + - r_w.w10.d1 + - r_w.w10.d2 + - r_w.w11.d0 + - r_w.w11.d1 + - r_w.w11.d2 - ); - - tempvar range_check_ptr = range_check_ptr + 55; - - tempvar two = 2; - assert poseidon_ptr.input = PoseidonBuiltinState( - s0=x.w0.d0 * x.w0.d1, s1=continuable_hash, s2=two - ); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=x.w0.d2 * x.w1.d0, s1=poseidon_ptr[0].output.s0, s2=two - ); - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=x.w1.d1 * x.w1.d2, s1=poseidon_ptr[1].output.s0, s2=two - ); - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=x.w2.d0 * x.w2.d1, s1=poseidon_ptr[2].output.s0, s2=two - ); - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=x.w2.d2 * x.w3.d0, s1=poseidon_ptr[3].output.s0, s2=two - ); - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=x.w3.d1 * x.w3.d2, s1=poseidon_ptr[4].output.s0, s2=two - ); - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=x.w4.d0 * x.w4.d1, s1=poseidon_ptr[5].output.s0, s2=two - ); - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=x.w4.d2 * x.w5.d0, s1=poseidon_ptr[6].output.s0, s2=two - ); - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=x.w5.d1 * x.w5.d2, s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=x.w6.d0 * x.w6.d1, s1=poseidon_ptr[8].output.s0, s2=two - ); - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=x.w6.d2 * x.w7.d0, s1=poseidon_ptr[9].output.s0, s2=two - ); - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=x.w7.d1 * x.w7.d2, s1=poseidon_ptr[10].output.s0, s2=two - ); - assert poseidon_ptr[12].input = PoseidonBuiltinState( - s0=x.w8.d0 * x.w8.d1, s1=poseidon_ptr[11].output.s0, s2=two - ); - assert poseidon_ptr[13].input = PoseidonBuiltinState( - s0=x.w8.d2 * x.w9.d0, s1=poseidon_ptr[12].output.s0, s2=two - ); - assert poseidon_ptr[14].input = PoseidonBuiltinState( - s0=x.w9.d1 * x.w9.d2, s1=poseidon_ptr[13].output.s0, s2=two - ); - assert poseidon_ptr[15].input = PoseidonBuiltinState( - s0=x.w10.d0 * x.w10.d1, s1=poseidon_ptr[14].output.s0, s2=two - ); - assert poseidon_ptr[16].input = PoseidonBuiltinState( - s0=x.w10.d2 * x.w11.d0, s1=poseidon_ptr[15].output.s0, s2=two - ); - assert poseidon_ptr[17].input = PoseidonBuiltinState( - s0=x.w11.d1 * x.w11.d2, s1=poseidon_ptr[16].output.s0, s2=two - ); - assert poseidon_ptr[18].input = PoseidonBuiltinState( - s0=y.w1.d0 * y.w1.d1, s1=poseidon_ptr[17].output.s0, s2=two - ); - assert poseidon_ptr[19].input = PoseidonBuiltinState( - s0=y.w1.d2 * y.w3.d0, s1=poseidon_ptr[18].output.s0, s2=two - ); - assert poseidon_ptr[20].input = PoseidonBuiltinState( - s0=y.w3.d1 * y.w3.d2, s1=poseidon_ptr[19].output.s0, s2=two - ); - assert poseidon_ptr[21].input = PoseidonBuiltinState( - s0=y.w7.d0 * y.w7.d1, s1=poseidon_ptr[20].output.s0, s2=two - ); - assert poseidon_ptr[22].input = PoseidonBuiltinState( - s0=y.w7.d2 * y.w9.d0, s1=poseidon_ptr[21].output.s0, s2=two - ); - assert poseidon_ptr[23].input = PoseidonBuiltinState( - s0=y.w9.d1 * y.w9.d2, s1=poseidon_ptr[22].output.s0, s2=two - ); - assert poseidon_ptr[24].input = PoseidonBuiltinState( - s0=q_w.w0.low * r_w.w0.d0, s1=poseidon_ptr[23].output.s0, s2=two - ); - assert poseidon_ptr[25].input = PoseidonBuiltinState( - s0=q_w.w0.high * r_w.w0.d1, s1=poseidon_ptr[24].output.s0, s2=two - ); - assert poseidon_ptr[26].input = PoseidonBuiltinState( - s0=q_w.w1.low * r_w.w0.d2, s1=poseidon_ptr[25].output.s0, s2=two - ); - assert poseidon_ptr[27].input = PoseidonBuiltinState( - s0=q_w.w1.high * r_w.w1.d0, s1=poseidon_ptr[26].output.s0, s2=two - ); - assert poseidon_ptr[28].input = PoseidonBuiltinState( - s0=q_w.w2.low * r_w.w1.d1, s1=poseidon_ptr[27].output.s0, s2=two - ); - assert poseidon_ptr[29].input = PoseidonBuiltinState( - s0=q_w.w2.high * r_w.w1.d2, s1=poseidon_ptr[28].output.s0, s2=two - ); - assert poseidon_ptr[30].input = PoseidonBuiltinState( - s0=q_w.w3.low * r_w.w2.d0, s1=poseidon_ptr[29].output.s0, s2=two - ); - assert poseidon_ptr[31].input = PoseidonBuiltinState( - s0=q_w.w3.high * r_w.w2.d1, s1=poseidon_ptr[30].output.s0, s2=two - ); - assert poseidon_ptr[32].input = PoseidonBuiltinState( - s0=q_w.w4.low * r_w.w2.d2, s1=poseidon_ptr[31].output.s0, s2=two - ); - assert poseidon_ptr[33].input = PoseidonBuiltinState( - s0=q_w.w4.high * r_w.w3.d0, s1=poseidon_ptr[32].output.s0, s2=two - ); - assert poseidon_ptr[34].input = PoseidonBuiltinState( - s0=q_w.w5.low * r_w.w3.d1, s1=poseidon_ptr[33].output.s0, s2=two - ); - assert poseidon_ptr[35].input = PoseidonBuiltinState( - s0=q_w.w5.high * r_w.w3.d2, s1=poseidon_ptr[34].output.s0, s2=two - ); - assert poseidon_ptr[36].input = PoseidonBuiltinState( - s0=q_w.w6.low * r_w.w4.d0, s1=poseidon_ptr[35].output.s0, s2=two - ); - assert poseidon_ptr[37].input = PoseidonBuiltinState( - s0=q_w.w6.high * r_w.w4.d1, s1=poseidon_ptr[36].output.s0, s2=two - ); - assert poseidon_ptr[38].input = PoseidonBuiltinState( - s0=q_w.w7.low * r_w.w4.d2, s1=poseidon_ptr[37].output.s0, s2=two - ); - assert poseidon_ptr[39].input = PoseidonBuiltinState( - s0=q_w.w7.high * r_w.w5.d0, s1=poseidon_ptr[38].output.s0, s2=two - ); - assert poseidon_ptr[40].input = PoseidonBuiltinState( - s0=q_w.w8.low * r_w.w5.d1, s1=poseidon_ptr[39].output.s0, s2=two - ); - assert poseidon_ptr[41].input = PoseidonBuiltinState( - s0=q_w.w8.high * r_w.w5.d2, s1=poseidon_ptr[40].output.s0, s2=two - ); - assert poseidon_ptr[42].input = PoseidonBuiltinState( - s0=r_w.w6.d0 * r_w.w6.d1, s1=poseidon_ptr[41].output.s0, s2=two - ); - assert poseidon_ptr[43].input = PoseidonBuiltinState( - s0=r_w.w6.d2 * r_w.w7.d0, s1=poseidon_ptr[42].output.s0, s2=two - ); - assert poseidon_ptr[44].input = PoseidonBuiltinState( - s0=r_w.w7.d1 * r_w.w7.d2, s1=poseidon_ptr[43].output.s0, s2=two - ); - assert poseidon_ptr[45].input = PoseidonBuiltinState( - s0=r_w.w8.d0 * r_w.w8.d1, s1=poseidon_ptr[44].output.s0, s2=two - ); - assert poseidon_ptr[46].input = PoseidonBuiltinState( - s0=r_w.w8.d2 * r_w.w9.d0, s1=poseidon_ptr[45].output.s0, s2=two - ); - assert poseidon_ptr[47].input = PoseidonBuiltinState( - s0=r_w.w9.d1 * r_w.w9.d2, s1=poseidon_ptr[46].output.s0, s2=two - ); - assert poseidon_ptr[48].input = PoseidonBuiltinState( - s0=r_w.w10.d0 * r_w.w10.d1, s1=poseidon_ptr[47].output.s0, s2=two - ); - assert poseidon_ptr[49].input = PoseidonBuiltinState( - s0=r_w.w10.d2 * r_w.w11.d0, s1=poseidon_ptr[48].output.s0, s2=two - ); - assert poseidon_ptr[50].input = PoseidonBuiltinState( - s0=r_w.w11.d1 * r_w.w11.d2, s1=poseidon_ptr[49].output.s0, s2=two - ); - - tempvar x_of_z_w1 = UnreducedBigInt5( - d0=x.w1.d0 * z_pow1_11.z_1.d0, - d1=x.w1.d0 * z_pow1_11.z_1.d1 + x.w1.d1 * z_pow1_11.z_1.d0, - d2=x.w1.d0 * z_pow1_11.z_1.d2 + x.w1.d1 * z_pow1_11.z_1.d1 + x.w1.d2 * z_pow1_11.z_1.d0, - d3=x.w1.d1 * z_pow1_11.z_1.d2 + x.w1.d2 * z_pow1_11.z_1.d1, - d4=x.w1.d2 * z_pow1_11.z_1.d2, - ); - tempvar x_of_z_w2 = UnreducedBigInt5( - d0=x.w2.d0 * z_pow1_11.z_2.d0, - d1=x.w2.d0 * z_pow1_11.z_2.d1 + x.w2.d1 * z_pow1_11.z_2.d0, - d2=x.w2.d0 * z_pow1_11.z_2.d2 + x.w2.d1 * z_pow1_11.z_2.d1 + x.w2.d2 * z_pow1_11.z_2.d0, - d3=x.w2.d1 * z_pow1_11.z_2.d2 + x.w2.d2 * z_pow1_11.z_2.d1, - d4=x.w2.d2 * z_pow1_11.z_2.d2, - ); - - tempvar x_of_z_w3 = UnreducedBigInt5( - d0=x.w3.d0 * z_pow1_11.z_3.d0, - d1=x.w3.d0 * z_pow1_11.z_3.d1 + x.w3.d1 * z_pow1_11.z_3.d0, - d2=x.w3.d0 * z_pow1_11.z_3.d2 + x.w3.d1 * z_pow1_11.z_3.d1 + x.w3.d2 * z_pow1_11.z_3.d0, - d3=x.w3.d1 * z_pow1_11.z_3.d2 + x.w3.d2 * z_pow1_11.z_3.d1, - d4=x.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar x_of_z_w4 = UnreducedBigInt5( - d0=x.w4.d0 * z_pow1_11.z_4.d0, - d1=x.w4.d0 * z_pow1_11.z_4.d1 + x.w4.d1 * z_pow1_11.z_4.d0, - d2=x.w4.d0 * z_pow1_11.z_4.d2 + x.w4.d1 * z_pow1_11.z_4.d1 + x.w4.d2 * z_pow1_11.z_4.d0, - d3=x.w4.d1 * z_pow1_11.z_4.d2 + x.w4.d2 * z_pow1_11.z_4.d1, - d4=x.w4.d2 * z_pow1_11.z_4.d2, - ); - - tempvar x_of_z_w5 = UnreducedBigInt5( - d0=x.w5.d0 * z_pow1_11.z_5.d0, - d1=x.w5.d0 * z_pow1_11.z_5.d1 + x.w5.d1 * z_pow1_11.z_5.d0, - d2=x.w5.d0 * z_pow1_11.z_5.d2 + x.w5.d1 * z_pow1_11.z_5.d1 + x.w5.d2 * z_pow1_11.z_5.d0, - d3=x.w5.d1 * z_pow1_11.z_5.d2 + x.w5.d2 * z_pow1_11.z_5.d1, - d4=x.w5.d2 * z_pow1_11.z_5.d2, - ); - - tempvar x_of_z_w6 = UnreducedBigInt5( - d0=x.w6.d0 * z_pow1_11.z_6.d0, - d1=x.w6.d0 * z_pow1_11.z_6.d1 + x.w6.d1 * z_pow1_11.z_6.d0, - d2=x.w6.d0 * z_pow1_11.z_6.d2 + x.w6.d1 * z_pow1_11.z_6.d1 + x.w6.d2 * z_pow1_11.z_6.d0, - d3=x.w6.d1 * z_pow1_11.z_6.d2 + x.w6.d2 * z_pow1_11.z_6.d1, - d4=x.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar x_of_z_w7 = UnreducedBigInt5( - d0=x.w7.d0 * z_pow1_11.z_7.d0, - d1=x.w7.d0 * z_pow1_11.z_7.d1 + x.w7.d1 * z_pow1_11.z_7.d0, - d2=x.w7.d0 * z_pow1_11.z_7.d2 + x.w7.d1 * z_pow1_11.z_7.d1 + x.w7.d2 * z_pow1_11.z_7.d0, - d3=x.w7.d1 * z_pow1_11.z_7.d2 + x.w7.d2 * z_pow1_11.z_7.d1, - d4=x.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar x_of_z_w8 = UnreducedBigInt5( - d0=x.w8.d0 * z_pow1_11.z_8.d0, - d1=x.w8.d0 * z_pow1_11.z_8.d1 + x.w8.d1 * z_pow1_11.z_8.d0, - d2=x.w8.d0 * z_pow1_11.z_8.d2 + x.w8.d1 * z_pow1_11.z_8.d1 + x.w8.d2 * z_pow1_11.z_8.d0, - d3=x.w8.d1 * z_pow1_11.z_8.d2 + x.w8.d2 * z_pow1_11.z_8.d1, - d4=x.w8.d2 * z_pow1_11.z_8.d2, - ); - - tempvar x_of_z_w9 = UnreducedBigInt5( - d0=x.w9.d0 * z_pow1_11.z_9.d0, - d1=x.w9.d0 * z_pow1_11.z_9.d1 + x.w9.d1 * z_pow1_11.z_9.d0, - d2=x.w9.d0 * z_pow1_11.z_9.d2 + x.w9.d1 * z_pow1_11.z_9.d1 + x.w9.d2 * z_pow1_11.z_9.d0, - d3=x.w9.d1 * z_pow1_11.z_9.d2 + x.w9.d2 * z_pow1_11.z_9.d1, - d4=x.w9.d2 * z_pow1_11.z_9.d2, - ); - - tempvar x_of_z_w10 = UnreducedBigInt5( - d0=x.w10.d0 * z_pow1_11.z_10.d0, - d1=x.w10.d0 * z_pow1_11.z_10.d1 + x.w10.d1 * z_pow1_11.z_10.d0, - d2=x.w10.d0 * z_pow1_11.z_10.d2 + x.w10.d1 * z_pow1_11.z_10.d1 + x.w10.d2 * - z_pow1_11.z_10.d0, - d3=x.w10.d1 * z_pow1_11.z_10.d2 + x.w10.d2 * z_pow1_11.z_10.d1, - d4=x.w10.d2 * z_pow1_11.z_10.d2, - ); - - tempvar x_of_z_w11 = UnreducedBigInt5( - d0=x.w11.d0 * z_pow1_11.z_11.d0, - d1=x.w11.d0 * z_pow1_11.z_11.d1 + x.w11.d1 * z_pow1_11.z_11.d0, - d2=x.w11.d0 * z_pow1_11.z_11.d2 + x.w11.d1 * z_pow1_11.z_11.d1 + x.w11.d2 * - z_pow1_11.z_11.d0, - d3=x.w11.d1 * z_pow1_11.z_11.d2 + x.w11.d2 * z_pow1_11.z_11.d1, - d4=x.w11.d2 * z_pow1_11.z_11.d2, - ); - - let x_of_z = reduce_5( - UnreducedBigInt5( - d0=x.w0.d0 + x_of_z_w1.d0 + x_of_z_w2.d0 + x_of_z_w3.d0 + x_of_z_w4.d0 + - x_of_z_w5.d0 + x_of_z_w6.d0 + x_of_z_w7.d0 + x_of_z_w8.d0 + x_of_z_w9.d0 + - x_of_z_w10.d0 + x_of_z_w11.d0, - d1=x.w0.d1 + x_of_z_w1.d1 + x_of_z_w2.d1 + x_of_z_w3.d1 + x_of_z_w4.d1 + - x_of_z_w5.d1 + x_of_z_w6.d1 + x_of_z_w7.d1 + x_of_z_w8.d1 + x_of_z_w9.d1 + - x_of_z_w10.d1 + x_of_z_w11.d1, - d2=x.w0.d2 + x_of_z_w1.d2 + x_of_z_w2.d2 + x_of_z_w3.d2 + x_of_z_w4.d2 + - x_of_z_w5.d2 + x_of_z_w6.d2 + x_of_z_w7.d2 + x_of_z_w8.d2 + x_of_z_w9.d2 + - x_of_z_w10.d2 + x_of_z_w11.d2, - d3=x_of_z_w1.d3 + x_of_z_w2.d3 + x_of_z_w3.d3 + x_of_z_w4.d3 + x_of_z_w5.d3 + - x_of_z_w6.d3 + x_of_z_w7.d3 + x_of_z_w8.d3 + x_of_z_w9.d3 + x_of_z_w10.d3 + - x_of_z_w11.d3, - d4=x_of_z_w1.d4 + x_of_z_w2.d4 + x_of_z_w3.d4 + x_of_z_w4.d4 + x_of_z_w5.d4 + - x_of_z_w6.d4 + x_of_z_w7.d4 + x_of_z_w8.d4 + x_of_z_w9.d4 + x_of_z_w10.d4 + - x_of_z_w11.d4, - ), - ); - - tempvar y_of_z_w1 = UnreducedBigInt5( - d0=y.w1.d0 * z_pow1_11.z_1.d0, - d1=y.w1.d0 * z_pow1_11.z_1.d1 + y.w1.d1 * z_pow1_11.z_1.d0, - d2=y.w1.d0 * z_pow1_11.z_1.d2 + y.w1.d1 * z_pow1_11.z_1.d1 + y.w1.d2 * z_pow1_11.z_1.d0, - d3=y.w1.d1 * z_pow1_11.z_1.d2 + y.w1.d2 * z_pow1_11.z_1.d1, - d4=y.w1.d2 * z_pow1_11.z_1.d2, - ); - - tempvar y_of_z_w3 = UnreducedBigInt5( - d0=y.w3.d0 * z_pow1_11.z_3.d0, - d1=y.w3.d0 * z_pow1_11.z_3.d1 + y.w3.d1 * z_pow1_11.z_3.d0, - d2=y.w3.d0 * z_pow1_11.z_3.d2 + y.w3.d1 * z_pow1_11.z_3.d1 + y.w3.d2 * z_pow1_11.z_3.d0, - d3=y.w3.d1 * z_pow1_11.z_3.d2 + y.w3.d2 * z_pow1_11.z_3.d1, - d4=y.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar y_of_z_w7 = UnreducedBigInt5( - d0=y.w7.d0 * z_pow1_11.z_7.d0, - d1=y.w7.d0 * z_pow1_11.z_7.d1 + y.w7.d1 * z_pow1_11.z_7.d0, - d2=y.w7.d0 * z_pow1_11.z_7.d2 + y.w7.d1 * z_pow1_11.z_7.d1 + y.w7.d2 * z_pow1_11.z_7.d0, - d3=y.w7.d1 * z_pow1_11.z_7.d2 + y.w7.d2 * z_pow1_11.z_7.d1, - d4=y.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar y_of_z_w9 = UnreducedBigInt5( - d0=y.w9.d0 * z_pow1_11.z_9.d0, - d1=y.w9.d0 * z_pow1_11.z_9.d1 + y.w9.d1 * z_pow1_11.z_9.d0, - d2=y.w9.d0 * z_pow1_11.z_9.d2 + y.w9.d1 * z_pow1_11.z_9.d1 + y.w9.d2 * z_pow1_11.z_9.d0, - d3=y.w9.d1 * z_pow1_11.z_9.d2 + y.w9.d2 * z_pow1_11.z_9.d1, - d4=y.w9.d2 * z_pow1_11.z_9.d2, - ); - - let y_of_z = reduce_5( - UnreducedBigInt5( - d0=1 + y_of_z_w1.d0 + y_of_z_w3.d0 + y_of_z_w7.d0 + y_of_z_w9.d0, - d1=y_of_z_w1.d1 + y_of_z_w3.d1 + y_of_z_w7.d1 + y_of_z_w9.d1, - d2=y_of_z_w1.d2 + y_of_z_w3.d2 + y_of_z_w7.d2 + y_of_z_w9.d2, - d3=y_of_z_w1.d3 + y_of_z_w3.d3 + y_of_z_w7.d3 + y_of_z_w9.d3, - d4=y_of_z_w1.d4 + y_of_z_w3.d4 + y_of_z_w7.d4 + y_of_z_w9.d4, - ), - ); - - let xy_acc = reduce_5( - UnreducedBigInt5( - d0=x_of_z.d0 * y_of_z.d0, - d1=x_of_z.d0 * y_of_z.d1 + x_of_z.d1 * y_of_z.d0, - d2=x_of_z.d0 * y_of_z.d2 + x_of_z.d1 * y_of_z.d1 + x_of_z.d2 * y_of_z.d0, - d3=x_of_z.d1 * y_of_z.d2 + x_of_z.d2 * y_of_z.d1, - d4=x_of_z.d2 * y_of_z.d2, - ), - ); - - let poseidon_ptr = poseidon_ptr + 51 * PoseidonBuiltin.SIZE; - let continuable_hash = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let random_linear_combination_coeff = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s1; - assert bitwise_ptr.x = random_linear_combination_coeff; - assert bitwise_ptr.y = BASE_MIN_1; - tempvar c_i = bitwise_ptr.x_and_y; - let bitwise_ptr = bitwise_ptr + BitwiseBuiltin.SIZE; - - local poly_acc_034_f: PolyAcc034 = PolyAcc034( - xy=UnreducedBigInt3( - d0=poly_acc_034.xy.d0 + c_i * xy_acc.d0, - d1=poly_acc_034.xy.d1 + c_i * xy_acc.d1, - d2=poly_acc_034.xy.d2 + c_i * xy_acc.d2, - ), - q=E9full( - Uint256( - c_i * q_w.w0.low + poly_acc_034.q.w0.low, - c_i * q_w.w0.high + poly_acc_034.q.w0.high, - ), - Uint256( - c_i * q_w.w1.low + poly_acc_034.q.w1.low, - c_i * q_w.w1.high + poly_acc_034.q.w1.high, - ), - Uint256( - c_i * q_w.w2.low + poly_acc_034.q.w2.low, - c_i * q_w.w2.high + poly_acc_034.q.w2.high, - ), - Uint256( - c_i * q_w.w3.low + poly_acc_034.q.w3.low, - c_i * q_w.w3.high + poly_acc_034.q.w3.high, - ), - Uint256( - c_i * q_w.w4.low + poly_acc_034.q.w4.low, - c_i * q_w.w4.high + poly_acc_034.q.w4.high, - ), - Uint256( - c_i * q_w.w5.low + poly_acc_034.q.w5.low, - c_i * q_w.w5.high + poly_acc_034.q.w5.high, - ), - Uint256( - c_i * q_w.w6.low + poly_acc_034.q.w6.low, - c_i * q_w.w6.high + poly_acc_034.q.w6.high, - ), - Uint256( - c_i * q_w.w7.low + poly_acc_034.q.w7.low, - c_i * q_w.w7.high + poly_acc_034.q.w7.high, - ), - Uint256( - c_i * q_w.w8.low + poly_acc_034.q.w8.low, - c_i * q_w.w8.high + poly_acc_034.q.w8.high, - ), - ), - r=E12DU( - UnreducedBigInt3( - d0=c_i * r_w.w0.d0 + poly_acc_034.r.w0.d0, - d1=c_i * r_w.w0.d1 + poly_acc_034.r.w0.d1, - d2=c_i * r_w.w0.d2 + poly_acc_034.r.w0.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w1.d0 + poly_acc_034.r.w1.d0, - d1=c_i * r_w.w1.d1 + poly_acc_034.r.w1.d1, - d2=c_i * r_w.w1.d2 + poly_acc_034.r.w1.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w2.d0 + poly_acc_034.r.w2.d0, - d1=c_i * r_w.w2.d1 + poly_acc_034.r.w2.d1, - d2=c_i * r_w.w2.d2 + poly_acc_034.r.w2.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w3.d0 + poly_acc_034.r.w3.d0, - d1=c_i * r_w.w3.d1 + poly_acc_034.r.w3.d1, - d2=c_i * r_w.w3.d2 + poly_acc_034.r.w3.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w4.d0 + poly_acc_034.r.w4.d0, - d1=c_i * r_w.w4.d1 + poly_acc_034.r.w4.d1, - d2=c_i * r_w.w4.d2 + poly_acc_034.r.w4.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w5.d0 + poly_acc_034.r.w5.d0, - d1=c_i * r_w.w5.d1 + poly_acc_034.r.w5.d1, - d2=c_i * r_w.w5.d2 + poly_acc_034.r.w5.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w6.d0 + poly_acc_034.r.w6.d0, - d1=c_i * r_w.w6.d1 + poly_acc_034.r.w6.d1, - d2=c_i * r_w.w6.d2 + poly_acc_034.r.w6.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w7.d0 + poly_acc_034.r.w7.d0, - d1=c_i * r_w.w7.d1 + poly_acc_034.r.w7.d1, - d2=c_i * r_w.w7.d2 + poly_acc_034.r.w7.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w8.d0 + poly_acc_034.r.w8.d0, - d1=c_i * r_w.w8.d1 + poly_acc_034.r.w8.d1, - d2=c_i * r_w.w8.d2 + poly_acc_034.r.w8.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w9.d0 + poly_acc_034.r.w9.d0, - d1=c_i * r_w.w9.d1 + poly_acc_034.r.w9.d1, - d2=c_i * r_w.w9.d2 + poly_acc_034.r.w9.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w10.d0 + poly_acc_034.r.w10.d0, - d1=c_i * r_w.w10.d1 + poly_acc_034.r.w10.d1, - d2=c_i * r_w.w10.d2 + poly_acc_034.r.w10.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w11.d0 + poly_acc_034.r.w11.d0, - d1=c_i * r_w.w11.d1 + poly_acc_034.r.w11.d1, - d2=c_i * r_w.w11.d2 + poly_acc_034.r.w11.d2, - ), - ), - ); - let poly_acc_034 = &poly_acc_034_f; - return &r_w; - } - - func mul034_034{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_11_ptr: ZPowers11*, - continuable_hash: felt, - poly_acc_034034: PolyAcc034034*, - }(x_ptr: E12full034*, y_ptr: E12full034*) -> E12full01234* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E12full034 = [x_ptr]; - local y: E12full034 = [y_ptr]; - local z_pow1_11: ZPowers11 = [z_pow1_11_ptr]; - local r_w: E12full01234; - local q_w: E7full; - - // w5 = 0 - - %{ - from tools.py.polynomial import Polynomial - from tools.py.field import BaseFieldElement, BaseField - from tools.py.extension_trick import w_to_gnark, gnark_to_w, flatten, pack_e12, mul_e12_gnark - from starkware.cairo.common.cairo_secp.secp_utils import split - from src.bn254.curve import IRREDUCIBLE_POLY_12 - from tools.make.utils import split_128 - - p=0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 - field = BaseField(p) - x=[1]+11*[0] - y=[1]+11*[0] - x_refs=[(1,ids.x.w1), (3,ids.x.w3), (7,ids.x.w7), (9,ids.x.w9)] - y_refs=[(1,ids.y.w1), (3,ids.y.w3), (7,ids.y.w7), (9,ids.y.w9)] - for i in range(ids.N_LIMBS): - for index, ref in y_refs: - y[index]+=as_int(getattr(ref, 'd'+str(i)), PRIME) * ids.BASE**i - for index, ref in x_refs: - x[index]+=as_int(getattr(ref, 'd'+str(i)), PRIME) * ids.BASE**i - x_gnark=w_to_gnark(x) - y_gnark=w_to_gnark(y) - #print(f"Y_Gnark: {y_gnark}") - #print(f"Y_034034: {y}") - x_poly=Polynomial([BaseFieldElement(x[i], field) for i in range(12)]) - y_poly=Polynomial([BaseFieldElement(y[i], field) for i in range(12)]) - z_poly=x_poly*y_poly - z_polyr=z_poly % IRREDUCIBLE_POLY_12 - z_polyq=z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs)<=7, f"len z_polyq_coeffs: {len(z_polyq_coeffs)}, degree: {z_polyq.degree()}" - assert len(z_polyr_coeffs)<=12, f"len z_polyr_coeffs: {z_polyr_coeffs}, degree: {z_polyr.degree()}" - assert z_polyr_coeffs[5]==0, f"Not a 01234" - # extend z_polyq with 0 to make it len 9: - z_polyq_coeffs = z_polyq_coeffs + (7-len(z_polyq_coeffs))*[0] - # extend z_polyr with 0 to make it len 12: - z_polyr_coeffs = z_polyr_coeffs + (12-len(z_polyr_coeffs))*[0] - expected = flatten(mul_e12_gnark(pack_e12(x_gnark), pack_e12(y_gnark))) - assert expected==w_to_gnark(z_polyr_coeffs), f"expected: {expected}, got: {w_to_gnark(z_polyr_coeffs)}" - #print(f"Z_PolyR: {z_polyr_coeffs}") - #print(f"Z_PolyR_to_gnark: {w_to_gnark(z_polyr_coeffs)}") - for i in range(12): - if i==5: - continue - val = split(z_polyr_coeffs[i]%p) - for k in range(3): - rsetattr(ids.r_w, f'w{i}.d{k}', val[k]) - for i in range(7): - val = split_128(z_polyq_coeffs[i]%p) - rsetattr(ids.q_w, f'w{i}.low', val[0]) - rsetattr(ids.q_w, f'w{i}.high', val[1]) - %} - assert [range_check_ptr + 0] = r_w.w0.d0; - assert [range_check_ptr + 1] = r_w.w0.d1; - assert [range_check_ptr + 2] = r_w.w0.d2; - assert [range_check_ptr + 3] = r_w.w1.d0; - assert [range_check_ptr + 4] = r_w.w1.d1; - assert [range_check_ptr + 5] = r_w.w1.d2; - assert [range_check_ptr + 6] = r_w.w2.d0; - assert [range_check_ptr + 7] = r_w.w2.d1; - assert [range_check_ptr + 8] = r_w.w2.d2; - assert [range_check_ptr + 9] = r_w.w3.d0; - assert [range_check_ptr + 10] = r_w.w3.d1; - assert [range_check_ptr + 11] = r_w.w3.d2; - assert [range_check_ptr + 12] = r_w.w4.d0; - assert [range_check_ptr + 13] = r_w.w4.d1; - assert [range_check_ptr + 14] = r_w.w4.d2; - assert [range_check_ptr + 15] = r_w.w6.d0; - assert [range_check_ptr + 16] = r_w.w6.d1; - assert [range_check_ptr + 17] = r_w.w6.d2; - assert [range_check_ptr + 18] = r_w.w7.d0; - assert [range_check_ptr + 19] = r_w.w7.d1; - assert [range_check_ptr + 20] = r_w.w7.d2; - assert [range_check_ptr + 21] = r_w.w8.d0; - assert [range_check_ptr + 22] = r_w.w8.d1; - assert [range_check_ptr + 23] = r_w.w8.d2; - assert [range_check_ptr + 24] = r_w.w9.d0; - assert [range_check_ptr + 25] = r_w.w9.d1; - assert [range_check_ptr + 26] = r_w.w9.d2; - assert [range_check_ptr + 27] = r_w.w10.d0; - assert [range_check_ptr + 28] = r_w.w10.d1; - assert [range_check_ptr + 29] = r_w.w10.d2; - assert [range_check_ptr + 30] = r_w.w11.d0; - assert [range_check_ptr + 31] = r_w.w11.d1; - assert [range_check_ptr + 32] = r_w.w11.d2; - assert [range_check_ptr + 33] = q_w.w0.low; - assert [range_check_ptr + 34] = q_w.w0.high; - assert [range_check_ptr + 35] = q_w.w1.low; - assert [range_check_ptr + 36] = q_w.w1.high; - assert [range_check_ptr + 37] = q_w.w2.low; - assert [range_check_ptr + 38] = q_w.w2.high; - assert [range_check_ptr + 39] = q_w.w3.low; - assert [range_check_ptr + 40] = q_w.w3.high; - assert [range_check_ptr + 41] = q_w.w4.low; - assert [range_check_ptr + 42] = q_w.w4.high; - assert [range_check_ptr + 43] = q_w.w5.low; - assert [range_check_ptr + 44] = q_w.w5.high; - assert [range_check_ptr + 45] = q_w.w6.low; - assert [range_check_ptr + 46] = q_w.w6.high; - assert [range_check_ptr + 47] = 11 * 3 * BASE_MIN_1 - ( - r_w.w0.d0 + - r_w.w0.d1 + - r_w.w0.d2 + - r_w.w1.d0 + - r_w.w1.d1 + - r_w.w1.d2 + - r_w.w2.d0 + - r_w.w2.d1 + - r_w.w2.d2 + - r_w.w3.d0 + - r_w.w3.d1 + - r_w.w3.d2 + - r_w.w4.d0 + - r_w.w4.d1 + - r_w.w4.d2 + - r_w.w6.d0 + - r_w.w6.d1 + - r_w.w6.d2 + - r_w.w7.d0 + - r_w.w7.d1 + - r_w.w7.d2 + - r_w.w8.d0 + - r_w.w8.d1 + - r_w.w8.d2 + - r_w.w9.d0 + - r_w.w9.d1 + - r_w.w9.d2 + - r_w.w10.d0 + - r_w.w10.d1 + - r_w.w10.d2 + - r_w.w11.d0 + - r_w.w11.d1 + - r_w.w11.d2 - ); - - tempvar range_check_ptr = range_check_ptr + 48; - - tempvar two = 2; - assert poseidon_ptr.input = PoseidonBuiltinState( - s0=x.w1.d0 * x.w1.d1, s1=continuable_hash, s2=two - ); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=x.w1.d2 * x.w3.d0, s1=poseidon_ptr[0].output.s0, s2=two - ); - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=x.w3.d1 * x.w3.d2, s1=poseidon_ptr[1].output.s0, s2=two - ); - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=x.w7.d0 * x.w7.d1, s1=poseidon_ptr[2].output.s0, s2=two - ); - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=x.w7.d2 * x.w9.d0, s1=poseidon_ptr[3].output.s0, s2=two - ); - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=x.w9.d1 * x.w9.d2, s1=poseidon_ptr[4].output.s0, s2=two - ); - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=y.w1.d0 * y.w1.d1, s1=poseidon_ptr[5].output.s0, s2=two - ); - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=y.w1.d2 * y.w3.d0, s1=poseidon_ptr[6].output.s0, s2=two - ); - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=y.w3.d1 * y.w3.d2, s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=y.w7.d0 * y.w7.d1, s1=poseidon_ptr[8].output.s0, s2=two - ); - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=y.w7.d2 * y.w9.d0, s1=poseidon_ptr[9].output.s0, s2=two - ); - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=y.w9.d1 * y.w9.d2, s1=poseidon_ptr[10].output.s0, s2=two - ); - assert poseidon_ptr[12].input = PoseidonBuiltinState( - s0=q_w.w0.low * r_w.w0.d0, s1=poseidon_ptr[11].output.s0, s2=two - ); - assert poseidon_ptr[13].input = PoseidonBuiltinState( - s0=q_w.w0.high * r_w.w0.d1, s1=poseidon_ptr[12].output.s0, s2=two - ); - assert poseidon_ptr[14].input = PoseidonBuiltinState( - s0=q_w.w1.low * r_w.w0.d2, s1=poseidon_ptr[13].output.s0, s2=two - ); - assert poseidon_ptr[15].input = PoseidonBuiltinState( - s0=q_w.w1.high * r_w.w1.d0, s1=poseidon_ptr[14].output.s0, s2=two - ); - assert poseidon_ptr[16].input = PoseidonBuiltinState( - s0=q_w.w2.low * r_w.w1.d1, s1=poseidon_ptr[15].output.s0, s2=two - ); - assert poseidon_ptr[17].input = PoseidonBuiltinState( - s0=q_w.w2.high * r_w.w1.d2, s1=poseidon_ptr[16].output.s0, s2=two - ); - assert poseidon_ptr[18].input = PoseidonBuiltinState( - s0=q_w.w3.low * r_w.w2.d0, s1=poseidon_ptr[17].output.s0, s2=two - ); - assert poseidon_ptr[19].input = PoseidonBuiltinState( - s0=q_w.w3.high * r_w.w2.d1, s1=poseidon_ptr[18].output.s0, s2=two - ); - assert poseidon_ptr[20].input = PoseidonBuiltinState( - s0=q_w.w4.low * r_w.w2.d2, s1=poseidon_ptr[19].output.s0, s2=two - ); - assert poseidon_ptr[21].input = PoseidonBuiltinState( - s0=q_w.w4.high * r_w.w3.d0, s1=poseidon_ptr[20].output.s0, s2=two - ); - assert poseidon_ptr[22].input = PoseidonBuiltinState( - s0=q_w.w5.low * r_w.w3.d1, s1=poseidon_ptr[21].output.s0, s2=two - ); - assert poseidon_ptr[23].input = PoseidonBuiltinState( - s0=q_w.w5.high * r_w.w3.d2, s1=poseidon_ptr[22].output.s0, s2=two - ); - assert poseidon_ptr[24].input = PoseidonBuiltinState( - s0=q_w.w6.low * r_w.w4.d0, s1=poseidon_ptr[23].output.s0, s2=two - ); - assert poseidon_ptr[25].input = PoseidonBuiltinState( - s0=q_w.w6.high * r_w.w4.d1, s1=poseidon_ptr[24].output.s0, s2=two - ); - assert poseidon_ptr[26].input = PoseidonBuiltinState( - s0=r_w.w4.d2 * r_w.w6.d0, s1=poseidon_ptr[25].output.s0, s2=two - ); - assert poseidon_ptr[27].input = PoseidonBuiltinState( - s0=r_w.w6.d1 * r_w.w6.d2, s1=poseidon_ptr[26].output.s0, s2=two - ); - assert poseidon_ptr[28].input = PoseidonBuiltinState( - s0=r_w.w7.d0 * r_w.w7.d1, s1=poseidon_ptr[27].output.s0, s2=two - ); - assert poseidon_ptr[29].input = PoseidonBuiltinState( - s0=r_w.w7.d2 * r_w.w8.d0, s1=poseidon_ptr[28].output.s0, s2=two - ); - assert poseidon_ptr[30].input = PoseidonBuiltinState( - s0=r_w.w8.d1 * r_w.w8.d2, s1=poseidon_ptr[29].output.s0, s2=two - ); - assert poseidon_ptr[31].input = PoseidonBuiltinState( - s0=r_w.w9.d0 * r_w.w9.d1, s1=poseidon_ptr[30].output.s0, s2=two - ); - assert poseidon_ptr[32].input = PoseidonBuiltinState( - s0=r_w.w9.d2 * r_w.w10.d0, s1=poseidon_ptr[31].output.s0, s2=two - ); - assert poseidon_ptr[33].input = PoseidonBuiltinState( - s0=r_w.w10.d1 * r_w.w10.d2, s1=poseidon_ptr[32].output.s0, s2=two - ); - assert poseidon_ptr[34].input = PoseidonBuiltinState( - s0=r_w.w11.d0 * r_w.w11.d1, s1=poseidon_ptr[33].output.s0, s2=two - ); - assert poseidon_ptr[35].input = PoseidonBuiltinState( - s0=r_w.w11.d2, s1=poseidon_ptr[34].output.s0, s2=two - ); - - tempvar x_of_z_w1 = UnreducedBigInt5( - d0=x.w1.d0 * z_pow1_11.z_1.d0, - d1=x.w1.d0 * z_pow1_11.z_1.d1 + x.w1.d1 * z_pow1_11.z_1.d0, - d2=x.w1.d0 * z_pow1_11.z_1.d2 + x.w1.d1 * z_pow1_11.z_1.d1 + x.w1.d2 * z_pow1_11.z_1.d0, - d3=x.w1.d1 * z_pow1_11.z_1.d2 + x.w1.d2 * z_pow1_11.z_1.d1, - d4=x.w1.d2 * z_pow1_11.z_1.d2, - ); - - tempvar x_of_z_w3 = UnreducedBigInt5( - d0=x.w3.d0 * z_pow1_11.z_3.d0, - d1=x.w3.d0 * z_pow1_11.z_3.d1 + x.w3.d1 * z_pow1_11.z_3.d0, - d2=x.w3.d0 * z_pow1_11.z_3.d2 + x.w3.d1 * z_pow1_11.z_3.d1 + x.w3.d2 * z_pow1_11.z_3.d0, - d3=x.w3.d1 * z_pow1_11.z_3.d2 + x.w3.d2 * z_pow1_11.z_3.d1, - d4=x.w3.d2 * z_pow1_11.z_3.d2, - ); - tempvar x_of_z_w7 = UnreducedBigInt5( - d0=x.w7.d0 * z_pow1_11.z_7.d0, - d1=x.w7.d0 * z_pow1_11.z_7.d1 + x.w7.d1 * z_pow1_11.z_7.d0, - d2=x.w7.d0 * z_pow1_11.z_7.d2 + x.w7.d1 * z_pow1_11.z_7.d1 + x.w7.d2 * z_pow1_11.z_7.d0, - d3=x.w7.d1 * z_pow1_11.z_7.d2 + x.w7.d2 * z_pow1_11.z_7.d1, - d4=x.w7.d2 * z_pow1_11.z_7.d2, - ); - tempvar x_of_z_w9 = UnreducedBigInt5( - d0=x.w9.d0 * z_pow1_11.z_9.d0, - d1=x.w9.d0 * z_pow1_11.z_9.d1 + x.w9.d1 * z_pow1_11.z_9.d0, - d2=x.w9.d0 * z_pow1_11.z_9.d2 + x.w9.d1 * z_pow1_11.z_9.d1 + x.w9.d2 * z_pow1_11.z_9.d0, - d3=x.w9.d1 * z_pow1_11.z_9.d2 + x.w9.d2 * z_pow1_11.z_9.d1, - d4=x.w9.d2 * z_pow1_11.z_9.d2, - ); - let x_of_z = reduce_5( - UnreducedBigInt5( - d0=1 + x_of_z_w1.d0 + x_of_z_w3.d0 + x_of_z_w7.d0 + x_of_z_w9.d0, - d1=x_of_z_w1.d1 + x_of_z_w3.d1 + x_of_z_w7.d1 + x_of_z_w9.d1, - d2=x_of_z_w1.d2 + x_of_z_w3.d2 + x_of_z_w7.d2 + x_of_z_w9.d2, - d3=x_of_z_w1.d3 + x_of_z_w3.d3 + x_of_z_w7.d3 + x_of_z_w9.d3, - d4=x_of_z_w1.d4 + x_of_z_w3.d4 + x_of_z_w7.d4 + x_of_z_w9.d4, - ), - ); - - tempvar y_of_z_w1 = UnreducedBigInt5( - d0=y.w1.d0 * z_pow1_11.z_1.d0, - d1=y.w1.d0 * z_pow1_11.z_1.d1 + y.w1.d1 * z_pow1_11.z_1.d0, - d2=y.w1.d0 * z_pow1_11.z_1.d2 + y.w1.d1 * z_pow1_11.z_1.d1 + y.w1.d2 * z_pow1_11.z_1.d0, - d3=y.w1.d1 * z_pow1_11.z_1.d2 + y.w1.d2 * z_pow1_11.z_1.d1, - d4=y.w1.d2 * z_pow1_11.z_1.d2, - ); - - tempvar y_of_z_w3 = UnreducedBigInt5( - d0=y.w3.d0 * z_pow1_11.z_3.d0, - d1=y.w3.d0 * z_pow1_11.z_3.d1 + y.w3.d1 * z_pow1_11.z_3.d0, - d2=y.w3.d0 * z_pow1_11.z_3.d2 + y.w3.d1 * z_pow1_11.z_3.d1 + y.w3.d2 * z_pow1_11.z_3.d0, - d3=y.w3.d1 * z_pow1_11.z_3.d2 + y.w3.d2 * z_pow1_11.z_3.d1, - d4=y.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar y_of_z_w7 = UnreducedBigInt5( - d0=y.w7.d0 * z_pow1_11.z_7.d0, - d1=y.w7.d0 * z_pow1_11.z_7.d1 + y.w7.d1 * z_pow1_11.z_7.d0, - d2=y.w7.d0 * z_pow1_11.z_7.d2 + y.w7.d1 * z_pow1_11.z_7.d1 + y.w7.d2 * z_pow1_11.z_7.d0, - d3=y.w7.d1 * z_pow1_11.z_7.d2 + y.w7.d2 * z_pow1_11.z_7.d1, - d4=y.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar y_of_z_w9 = UnreducedBigInt5( - d0=y.w9.d0 * z_pow1_11.z_9.d0, - d1=y.w9.d0 * z_pow1_11.z_9.d1 + y.w9.d1 * z_pow1_11.z_9.d0, - d2=y.w9.d0 * z_pow1_11.z_9.d2 + y.w9.d1 * z_pow1_11.z_9.d1 + y.w9.d2 * z_pow1_11.z_9.d0, - d3=y.w9.d1 * z_pow1_11.z_9.d2 + y.w9.d2 * z_pow1_11.z_9.d1, - d4=y.w9.d2 * z_pow1_11.z_9.d2, - ); - - let y_of_z = reduce_5( - UnreducedBigInt5( - d0=1 + y_of_z_w1.d0 + y_of_z_w3.d0 + y_of_z_w7.d0 + y_of_z_w9.d0, - d1=y_of_z_w1.d1 + y_of_z_w3.d1 + y_of_z_w7.d1 + y_of_z_w9.d1, - d2=y_of_z_w1.d2 + y_of_z_w3.d2 + y_of_z_w7.d2 + y_of_z_w9.d2, - d3=y_of_z_w1.d3 + y_of_z_w3.d3 + y_of_z_w7.d3 + y_of_z_w9.d3, - d4=y_of_z_w1.d4 + y_of_z_w3.d4 + y_of_z_w7.d4 + y_of_z_w9.d4, - ), - ); - - let xy_acc = reduce_5( - UnreducedBigInt5( - d0=x_of_z.d0 * y_of_z.d0, - d1=x_of_z.d0 * y_of_z.d1 + x_of_z.d1 * y_of_z.d0, - d2=x_of_z.d0 * y_of_z.d2 + x_of_z.d1 * y_of_z.d1 + x_of_z.d2 * y_of_z.d0, - d3=x_of_z.d1 * y_of_z.d2 + x_of_z.d2 * y_of_z.d1, - d4=x_of_z.d2 * y_of_z.d2, - ), - ); - - let poseidon_ptr = poseidon_ptr + 36 * PoseidonBuiltin.SIZE; - let continuable_hash = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let random_linear_combination_coeff = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s1; - - assert bitwise_ptr.x = random_linear_combination_coeff; - assert bitwise_ptr.y = BASE_MIN_1; - tempvar c_i = bitwise_ptr.x_and_y; - let bitwise_ptr = bitwise_ptr + BitwiseBuiltin.SIZE; - - local poly_acc_034034_f: PolyAcc034034 = PolyAcc034034( - xy=UnreducedBigInt3( - d0=poly_acc_034034.xy.d0 + c_i * xy_acc.d0, - d1=poly_acc_034034.xy.d1 + c_i * xy_acc.d1, - d2=poly_acc_034034.xy.d2 + c_i * xy_acc.d2, - ), - q=E7full( - Uint256( - c_i * q_w.w0.low + poly_acc_034034.q.w0.low, - c_i * q_w.w0.high + poly_acc_034034.q.w0.high, - ), - Uint256( - c_i * q_w.w1.low + poly_acc_034034.q.w1.low, - c_i * q_w.w1.high + poly_acc_034034.q.w1.high, - ), - Uint256( - c_i * q_w.w2.low + poly_acc_034034.q.w2.low, - c_i * q_w.w2.high + poly_acc_034034.q.w2.high, - ), - Uint256( - c_i * q_w.w3.low + poly_acc_034034.q.w3.low, - c_i * q_w.w3.high + poly_acc_034034.q.w3.high, - ), - Uint256( - c_i * q_w.w4.low + poly_acc_034034.q.w4.low, - c_i * q_w.w4.high + poly_acc_034034.q.w4.high, - ), - Uint256( - c_i * q_w.w5.low + poly_acc_034034.q.w5.low, - c_i * q_w.w5.high + poly_acc_034034.q.w5.high, - ), - Uint256( - c_i * q_w.w6.low + poly_acc_034034.q.w6.low, - c_i * q_w.w6.high + poly_acc_034034.q.w6.high, - ), - ), - r=E12full01234( - w0=BigInt3( - d0=c_i * r_w.w0.d0 + poly_acc_034034.r.w0.d0, - d1=c_i * r_w.w0.d1 + poly_acc_034034.r.w0.d1, - d2=c_i * r_w.w0.d2 + poly_acc_034034.r.w0.d2, - ), - w1=BigInt3( - d0=c_i * r_w.w1.d0 + poly_acc_034034.r.w1.d0, - d1=c_i * r_w.w1.d1 + poly_acc_034034.r.w1.d1, - d2=c_i * r_w.w1.d2 + poly_acc_034034.r.w1.d2, - ), - w2=BigInt3( - d0=c_i * r_w.w2.d0 + poly_acc_034034.r.w2.d0, - d1=c_i * r_w.w2.d1 + poly_acc_034034.r.w2.d1, - d2=c_i * r_w.w2.d2 + poly_acc_034034.r.w2.d2, - ), - w3=BigInt3( - d0=c_i * r_w.w3.d0 + poly_acc_034034.r.w3.d0, - d1=c_i * r_w.w3.d1 + poly_acc_034034.r.w3.d1, - d2=c_i * r_w.w3.d2 + poly_acc_034034.r.w3.d2, - ), - w4=BigInt3( - d0=c_i * r_w.w4.d0 + poly_acc_034034.r.w4.d0, - d1=c_i * r_w.w4.d1 + poly_acc_034034.r.w4.d1, - d2=c_i * r_w.w4.d2 + poly_acc_034034.r.w4.d2, - ), - w6=BigInt3( - d0=c_i * r_w.w6.d0 + poly_acc_034034.r.w6.d0, - d1=c_i * r_w.w6.d1 + poly_acc_034034.r.w6.d1, - d2=c_i * r_w.w6.d2 + poly_acc_034034.r.w6.d2, - ), - w7=BigInt3( - d0=c_i * r_w.w7.d0 + poly_acc_034034.r.w7.d0, - d1=c_i * r_w.w7.d1 + poly_acc_034034.r.w7.d1, - d2=c_i * r_w.w7.d2 + poly_acc_034034.r.w7.d2, - ), - w8=BigInt3( - d0=c_i * r_w.w8.d0 + poly_acc_034034.r.w8.d0, - d1=c_i * r_w.w8.d1 + poly_acc_034034.r.w8.d1, - d2=c_i * r_w.w8.d2 + poly_acc_034034.r.w8.d2, - ), - w9=BigInt3( - d0=c_i * r_w.w9.d0 + poly_acc_034034.r.w9.d0, - d1=c_i * r_w.w9.d1 + poly_acc_034034.r.w9.d1, - d2=c_i * r_w.w9.d2 + poly_acc_034034.r.w9.d2, - ), - w10=BigInt3( - d0=c_i * r_w.w10.d0 + poly_acc_034034.r.w10.d0, - d1=c_i * r_w.w10.d1 + poly_acc_034034.r.w10.d1, - d2=c_i * r_w.w10.d2 + poly_acc_034034.r.w10.d2, - ), - w11=BigInt3( - d0=c_i * r_w.w11.d0 + poly_acc_034034.r.w11.d0, - d1=c_i * r_w.w11.d1 + poly_acc_034034.r.w11.d1, - d2=c_i * r_w.w11.d2 + poly_acc_034034.r.w11.d2, - ), - ), - ); - - let poly_acc_034034 = &poly_acc_034034_f; - - return &r_w; - } - - func mul01234{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_11_ptr: ZPowers11*, - continuable_hash: felt, - poly_acc_12: PolyAcc12*, - }(x_ptr: E12D*, y_ptr: E12full01234*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E12D = [x_ptr]; - local y: E12full01234 = [y_ptr]; - local z_pow1_11: ZPowers11 = [z_pow1_11_ptr]; - local r_w: E12D; - local q_w: E11DU; - - %{ - from tools.py.polynomial import Polynomial - from tools.py.field import BaseFieldElement, BaseField - from starkware.cairo.common.cairo_secp.secp_utils import split - from tools.make.utils import split_128 - from src.bn254.curve import IRREDUCIBLE_POLY_12 - - p=0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 - field = BaseField(p) - x=12*[0] - y=12*[0] - x_refs = [ids.x.w0, ids.x.w1, ids.x.w2, ids.x.w3, ids.x.w4, ids.x.w5, ids.x.w6, ids.x.w7, ids.x.w8, ids.x.w9, ids.x.w10, ids.x.w11] - y_refs = [ids.y.w0, ids.y.w1, ids.y.w2, ids.y.w3, ids.y.w4, None, ids.y.w6, ids.y.w7, ids.y.w8, ids.y.w9, ids.y.w10, ids.y.w11] - for i in range(ids.N_LIMBS): - for k in range(12): - x[k]+=as_int(getattr(x_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - for k in range(12): - if k==5: - continue - y[k]+=as_int(getattr(y_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - x_poly=Polynomial([BaseFieldElement(x[i], field) for i in range(12)]) - y_poly=Polynomial([BaseFieldElement(y[i], field) for i in range(12)]) - z_poly=x_poly*y_poly - z_polyr=z_poly % IRREDUCIBLE_POLY_12 - z_polyq=z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs)<=11, f"len z_polyq_coeffs: {len(z_polyq_coeffs)}, degree: {z_polyq.degree()}" - assert len(z_polyr_coeffs)<=12, f"len z_polyr_coeffs: {z_polyr_coeffs}, degree: {z_polyr.degree()}" - #print(f"Z_PolyR034034: {z_polyr_coeffs}") - # extend z_polyq with 0 to make it len 9: - z_polyq_coeffs = z_polyq_coeffs + (11-len(z_polyq_coeffs))*[0] - # extend z_polyr with 0 to make it len 12: - z_polyr_coeffs = z_polyr_coeffs + (12-len(z_polyr_coeffs))*[0] - #expected = flatten(mul_e12_gnark(pack_e12(x_gnark), pack_e12(y_gnark))) - #assert expected==w_to_gnark(z_polyr_coeffs) - #print(f"Z_PolyR: {z_polyr_coeffs}") - #print(f"Z_PolyR_to_gnark: {w_to_gnark(z_polyr_coeffs)}") - fill_e12d(z_polyr_coeffs, ids.r_w, ids.N_LIMBS, ids.BASE) - for i in range(11): - fill_uint256(z_polyq_coeffs[i], getattr(ids.q_w, 'w'+str(i))) - %} - assert [range_check_ptr + 0] = r_w.w0.d0; - assert [range_check_ptr + 1] = r_w.w0.d1; - assert [range_check_ptr + 2] = r_w.w0.d2; - assert [range_check_ptr + 3] = r_w.w1.d0; - assert [range_check_ptr + 4] = r_w.w1.d1; - assert [range_check_ptr + 5] = r_w.w1.d2; - assert [range_check_ptr + 6] = r_w.w2.d0; - assert [range_check_ptr + 7] = r_w.w2.d1; - assert [range_check_ptr + 8] = r_w.w2.d2; - assert [range_check_ptr + 9] = r_w.w3.d0; - assert [range_check_ptr + 10] = r_w.w3.d1; - assert [range_check_ptr + 11] = r_w.w3.d2; - assert [range_check_ptr + 12] = r_w.w4.d0; - assert [range_check_ptr + 13] = r_w.w4.d1; - assert [range_check_ptr + 14] = r_w.w4.d2; - assert [range_check_ptr + 15] = r_w.w5.d0; - assert [range_check_ptr + 16] = r_w.w5.d1; - assert [range_check_ptr + 17] = r_w.w5.d2; - assert [range_check_ptr + 18] = r_w.w6.d0; - assert [range_check_ptr + 19] = r_w.w6.d1; - assert [range_check_ptr + 20] = r_w.w6.d2; - assert [range_check_ptr + 21] = r_w.w7.d0; - assert [range_check_ptr + 22] = r_w.w7.d1; - assert [range_check_ptr + 23] = r_w.w7.d2; - assert [range_check_ptr + 24] = r_w.w8.d0; - assert [range_check_ptr + 25] = r_w.w8.d1; - assert [range_check_ptr + 26] = r_w.w8.d2; - assert [range_check_ptr + 27] = r_w.w9.d0; - assert [range_check_ptr + 28] = r_w.w9.d1; - assert [range_check_ptr + 29] = r_w.w9.d2; - assert [range_check_ptr + 30] = r_w.w10.d0; - assert [range_check_ptr + 31] = r_w.w10.d1; - assert [range_check_ptr + 32] = r_w.w10.d2; - assert [range_check_ptr + 33] = r_w.w11.d0; - assert [range_check_ptr + 34] = r_w.w11.d1; - assert [range_check_ptr + 35] = r_w.w11.d2; - assert [range_check_ptr + 36] = q_w.w0.low; - assert [range_check_ptr + 37] = q_w.w0.high; - assert [range_check_ptr + 38] = q_w.w1.low; - assert [range_check_ptr + 39] = q_w.w1.high; - assert [range_check_ptr + 40] = q_w.w2.low; - assert [range_check_ptr + 41] = q_w.w2.high; - assert [range_check_ptr + 42] = q_w.w3.low; - assert [range_check_ptr + 43] = q_w.w3.high; - assert [range_check_ptr + 44] = q_w.w4.low; - assert [range_check_ptr + 45] = q_w.w4.high; - assert [range_check_ptr + 46] = q_w.w5.low; - assert [range_check_ptr + 47] = q_w.w5.high; - assert [range_check_ptr + 48] = q_w.w6.low; - assert [range_check_ptr + 49] = q_w.w6.high; - assert [range_check_ptr + 50] = q_w.w7.low; - assert [range_check_ptr + 51] = q_w.w7.high; - assert [range_check_ptr + 52] = q_w.w8.low; - assert [range_check_ptr + 53] = q_w.w8.high; - assert [range_check_ptr + 54] = q_w.w9.low; - assert [range_check_ptr + 55] = q_w.w9.high; - assert [range_check_ptr + 56] = q_w.w10.low; - assert [range_check_ptr + 57] = q_w.w10.high; - assert [range_check_ptr + 58] = 12 * 3 * BASE_MIN_1 - ( - r_w.w0.d0 + - r_w.w0.d1 + - r_w.w0.d2 + - r_w.w1.d0 + - r_w.w1.d1 + - r_w.w1.d2 + - r_w.w2.d0 + - r_w.w2.d1 + - r_w.w2.d2 + - r_w.w3.d0 + - r_w.w3.d1 + - r_w.w3.d2 + - r_w.w4.d0 + - r_w.w4.d1 + - r_w.w4.d2 + - r_w.w5.d0 + - r_w.w5.d1 + - r_w.w5.d2 + - r_w.w6.d0 + - r_w.w6.d1 + - r_w.w6.d2 + - r_w.w7.d0 + - r_w.w7.d1 + - r_w.w7.d2 + - r_w.w8.d0 + - r_w.w8.d1 + - r_w.w8.d2 + - r_w.w9.d0 + - r_w.w9.d1 + - r_w.w9.d2 + - r_w.w10.d0 + - r_w.w10.d1 + - r_w.w10.d2 + - r_w.w11.d0 + - r_w.w11.d1 + - r_w.w11.d2 - ); - tempvar range_check_ptr = range_check_ptr + 59; - - tempvar two = 2; - assert poseidon_ptr.input = PoseidonBuiltinState( - s0=x.w0.d0 * x.w0.d1, s1=continuable_hash, s2=two - ); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=x.w0.d2 * x.w1.d0, s1=poseidon_ptr[0].output.s0, s2=two - ); - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=x.w1.d1 * x.w1.d2, s1=poseidon_ptr[1].output.s0, s2=two - ); - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=x.w2.d0 * x.w2.d1, s1=poseidon_ptr[2].output.s0, s2=two - ); - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=x.w2.d2 * x.w3.d0, s1=poseidon_ptr[3].output.s0, s2=two - ); - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=x.w3.d1 * x.w3.d2, s1=poseidon_ptr[4].output.s0, s2=two - ); - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=x.w4.d0 * x.w4.d1, s1=poseidon_ptr[5].output.s0, s2=two - ); - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=x.w4.d2 * x.w5.d0, s1=poseidon_ptr[6].output.s0, s2=two - ); - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=x.w5.d1 * x.w5.d2, s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=x.w6.d0 * x.w6.d1, s1=poseidon_ptr[8].output.s0, s2=two - ); - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=x.w6.d2 * x.w7.d0, s1=poseidon_ptr[9].output.s0, s2=two - ); - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=x.w7.d1 * x.w7.d2, s1=poseidon_ptr[10].output.s0, s2=two - ); - assert poseidon_ptr[12].input = PoseidonBuiltinState( - s0=x.w8.d0 * x.w8.d1, s1=poseidon_ptr[11].output.s0, s2=two - ); - assert poseidon_ptr[13].input = PoseidonBuiltinState( - s0=x.w8.d2 * x.w9.d0, s1=poseidon_ptr[12].output.s0, s2=two - ); - assert poseidon_ptr[14].input = PoseidonBuiltinState( - s0=x.w9.d1 * x.w9.d2, s1=poseidon_ptr[13].output.s0, s2=two - ); - assert poseidon_ptr[15].input = PoseidonBuiltinState( - s0=x.w10.d0 * x.w10.d1, s1=poseidon_ptr[14].output.s0, s2=two - ); - assert poseidon_ptr[16].input = PoseidonBuiltinState( - s0=x.w10.d2 * x.w11.d0, s1=poseidon_ptr[15].output.s0, s2=two - ); - assert poseidon_ptr[17].input = PoseidonBuiltinState( - s0=x.w11.d1 * x.w11.d2, s1=poseidon_ptr[16].output.s0, s2=two - ); - assert poseidon_ptr[18].input = PoseidonBuiltinState( - s0=y.w0.d0 * y.w0.d1, s1=poseidon_ptr[17].output.s0, s2=two - ); - assert poseidon_ptr[19].input = PoseidonBuiltinState( - s0=y.w0.d2 * y.w1.d0, s1=poseidon_ptr[18].output.s0, s2=two - ); - assert poseidon_ptr[20].input = PoseidonBuiltinState( - s0=y.w1.d1 * y.w1.d2, s1=poseidon_ptr[19].output.s0, s2=two - ); - assert poseidon_ptr[21].input = PoseidonBuiltinState( - s0=y.w2.d0 * y.w2.d1, s1=poseidon_ptr[20].output.s0, s2=two - ); - assert poseidon_ptr[22].input = PoseidonBuiltinState( - s0=y.w2.d2 * y.w3.d0, s1=poseidon_ptr[21].output.s0, s2=two - ); - assert poseidon_ptr[23].input = PoseidonBuiltinState( - s0=y.w3.d1 * y.w3.d2, s1=poseidon_ptr[22].output.s0, s2=two - ); - assert poseidon_ptr[24].input = PoseidonBuiltinState( - s0=y.w4.d0 * y.w4.d1, s1=poseidon_ptr[23].output.s0, s2=two - ); - assert poseidon_ptr[25].input = PoseidonBuiltinState( - s0=y.w4.d2 * y.w6.d0, s1=poseidon_ptr[24].output.s0, s2=two - ); - assert poseidon_ptr[26].input = PoseidonBuiltinState( - s0=y.w6.d1 * y.w6.d2, s1=poseidon_ptr[25].output.s0, s2=two - ); - assert poseidon_ptr[27].input = PoseidonBuiltinState( - s0=y.w7.d0 * y.w7.d1, s1=poseidon_ptr[26].output.s0, s2=two - ); - assert poseidon_ptr[28].input = PoseidonBuiltinState( - s0=y.w7.d2 * y.w8.d0, s1=poseidon_ptr[27].output.s0, s2=two - ); - assert poseidon_ptr[29].input = PoseidonBuiltinState( - s0=y.w8.d1 * y.w8.d2, s1=poseidon_ptr[28].output.s0, s2=two - ); - assert poseidon_ptr[30].input = PoseidonBuiltinState( - s0=y.w9.d0 * y.w9.d1, s1=poseidon_ptr[29].output.s0, s2=two - ); - assert poseidon_ptr[31].input = PoseidonBuiltinState( - s0=y.w9.d2 * y.w10.d0, s1=poseidon_ptr[30].output.s0, s2=two - ); - assert poseidon_ptr[32].input = PoseidonBuiltinState( - s0=y.w10.d1 * y.w10.d2, s1=poseidon_ptr[31].output.s0, s2=two - ); - assert poseidon_ptr[33].input = PoseidonBuiltinState( - s0=y.w11.d0 * y.w11.d1, s1=poseidon_ptr[32].output.s0, s2=two - ); - assert poseidon_ptr[34].input = PoseidonBuiltinState( - s0=y.w11.d2, s1=poseidon_ptr[33].output.s0, s2=two - ); - assert poseidon_ptr[35].input = PoseidonBuiltinState( - s0=q_w.w0.low * r_w.w0.d0, s1=poseidon_ptr[34].output.s0, s2=two - ); - assert poseidon_ptr[36].input = PoseidonBuiltinState( - s0=q_w.w0.high * r_w.w0.d1, s1=poseidon_ptr[35].output.s0, s2=two - ); - assert poseidon_ptr[37].input = PoseidonBuiltinState( - s0=q_w.w1.low * r_w.w0.d2, s1=poseidon_ptr[36].output.s0, s2=two - ); - assert poseidon_ptr[38].input = PoseidonBuiltinState( - s0=q_w.w1.high * r_w.w1.d0, s1=poseidon_ptr[37].output.s0, s2=two - ); - assert poseidon_ptr[39].input = PoseidonBuiltinState( - s0=q_w.w2.low * r_w.w1.d1, s1=poseidon_ptr[38].output.s0, s2=two - ); - assert poseidon_ptr[40].input = PoseidonBuiltinState( - s0=q_w.w2.high * r_w.w1.d2, s1=poseidon_ptr[39].output.s0, s2=two - ); - assert poseidon_ptr[41].input = PoseidonBuiltinState( - s0=q_w.w3.low * r_w.w2.d0, s1=poseidon_ptr[40].output.s0, s2=two - ); - assert poseidon_ptr[42].input = PoseidonBuiltinState( - s0=q_w.w3.high * r_w.w2.d1, s1=poseidon_ptr[41].output.s0, s2=two - ); - assert poseidon_ptr[43].input = PoseidonBuiltinState( - s0=q_w.w4.low * r_w.w2.d2, s1=poseidon_ptr[42].output.s0, s2=two - ); - assert poseidon_ptr[44].input = PoseidonBuiltinState( - s0=q_w.w4.high * r_w.w3.d0, s1=poseidon_ptr[43].output.s0, s2=two - ); - assert poseidon_ptr[45].input = PoseidonBuiltinState( - s0=q_w.w5.low * r_w.w3.d1, s1=poseidon_ptr[44].output.s0, s2=two - ); - assert poseidon_ptr[46].input = PoseidonBuiltinState( - s0=q_w.w5.high * r_w.w3.d2, s1=poseidon_ptr[45].output.s0, s2=two - ); - assert poseidon_ptr[47].input = PoseidonBuiltinState( - s0=q_w.w6.low * r_w.w4.d0, s1=poseidon_ptr[46].output.s0, s2=two - ); - assert poseidon_ptr[48].input = PoseidonBuiltinState( - s0=q_w.w6.high * r_w.w4.d1, s1=poseidon_ptr[47].output.s0, s2=two - ); - assert poseidon_ptr[49].input = PoseidonBuiltinState( - s0=q_w.w7.low * r_w.w4.d2, s1=poseidon_ptr[48].output.s0, s2=two - ); - assert poseidon_ptr[50].input = PoseidonBuiltinState( - s0=q_w.w7.high * r_w.w5.d0, s1=poseidon_ptr[49].output.s0, s2=two - ); - assert poseidon_ptr[51].input = PoseidonBuiltinState( - s0=q_w.w8.low * r_w.w5.d1, s1=poseidon_ptr[50].output.s0, s2=two - ); - assert poseidon_ptr[52].input = PoseidonBuiltinState( - s0=q_w.w8.high * r_w.w5.d2, s1=poseidon_ptr[51].output.s0, s2=two - ); - assert poseidon_ptr[53].input = PoseidonBuiltinState( - s0=q_w.w9.low * r_w.w6.d0, s1=poseidon_ptr[52].output.s0, s2=two - ); - assert poseidon_ptr[54].input = PoseidonBuiltinState( - s0=q_w.w9.high * r_w.w6.d1, s1=poseidon_ptr[53].output.s0, s2=two - ); - assert poseidon_ptr[55].input = PoseidonBuiltinState( - s0=q_w.w10.low * r_w.w6.d2, s1=poseidon_ptr[54].output.s0, s2=two - ); - assert poseidon_ptr[56].input = PoseidonBuiltinState( - s0=q_w.w10.high * r_w.w7.d0, s1=poseidon_ptr[55].output.s0, s2=two - ); - assert poseidon_ptr[57].input = PoseidonBuiltinState( - s0=r_w.w7.d1 * r_w.w7.d2, s1=poseidon_ptr[56].output.s0, s2=two - ); - assert poseidon_ptr[58].input = PoseidonBuiltinState( - s0=r_w.w8.d0 * r_w.w8.d1, s1=poseidon_ptr[57].output.s0, s2=two - ); - assert poseidon_ptr[59].input = PoseidonBuiltinState( - s0=r_w.w8.d2 * r_w.w9.d0, s1=poseidon_ptr[58].output.s0, s2=two - ); - assert poseidon_ptr[60].input = PoseidonBuiltinState( - s0=r_w.w9.d1 * r_w.w9.d2, s1=poseidon_ptr[59].output.s0, s2=two - ); - assert poseidon_ptr[61].input = PoseidonBuiltinState( - s0=r_w.w10.d0 * r_w.w10.d1, s1=poseidon_ptr[60].output.s0, s2=two - ); - assert poseidon_ptr[62].input = PoseidonBuiltinState( - s0=r_w.w10.d2 * r_w.w11.d0, s1=poseidon_ptr[61].output.s0, s2=two - ); - assert poseidon_ptr[63].input = PoseidonBuiltinState( - s0=r_w.w11.d1 * r_w.w11.d2, s1=poseidon_ptr[62].output.s0, s2=two - ); - - tempvar x_of_z_w1 = UnreducedBigInt5( - d0=x.w1.d0 * z_pow1_11.z_1.d0, - d1=x.w1.d0 * z_pow1_11.z_1.d1 + x.w1.d1 * z_pow1_11.z_1.d0, - d2=x.w1.d0 * z_pow1_11.z_1.d2 + x.w1.d1 * z_pow1_11.z_1.d1 + x.w1.d2 * z_pow1_11.z_1.d0, - d3=x.w1.d1 * z_pow1_11.z_1.d2 + x.w1.d2 * z_pow1_11.z_1.d1, - d4=x.w1.d2 * z_pow1_11.z_1.d2, - ); - tempvar x_of_z_w2 = UnreducedBigInt5( - d0=x.w2.d0 * z_pow1_11.z_2.d0, - d1=x.w2.d0 * z_pow1_11.z_2.d1 + x.w2.d1 * z_pow1_11.z_2.d0, - d2=x.w2.d0 * z_pow1_11.z_2.d2 + x.w2.d1 * z_pow1_11.z_2.d1 + x.w2.d2 * z_pow1_11.z_2.d0, - d3=x.w2.d1 * z_pow1_11.z_2.d2 + x.w2.d2 * z_pow1_11.z_2.d1, - d4=x.w2.d2 * z_pow1_11.z_2.d2, - ); - - tempvar x_of_z_w3 = UnreducedBigInt5( - d0=x.w3.d0 * z_pow1_11.z_3.d0, - d1=x.w3.d0 * z_pow1_11.z_3.d1 + x.w3.d1 * z_pow1_11.z_3.d0, - d2=x.w3.d0 * z_pow1_11.z_3.d2 + x.w3.d1 * z_pow1_11.z_3.d1 + x.w3.d2 * z_pow1_11.z_3.d0, - d3=x.w3.d1 * z_pow1_11.z_3.d2 + x.w3.d2 * z_pow1_11.z_3.d1, - d4=x.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar x_of_z_w4 = UnreducedBigInt5( - d0=x.w4.d0 * z_pow1_11.z_4.d0, - d1=x.w4.d0 * z_pow1_11.z_4.d1 + x.w4.d1 * z_pow1_11.z_4.d0, - d2=x.w4.d0 * z_pow1_11.z_4.d2 + x.w4.d1 * z_pow1_11.z_4.d1 + x.w4.d2 * z_pow1_11.z_4.d0, - d3=x.w4.d1 * z_pow1_11.z_4.d2 + x.w4.d2 * z_pow1_11.z_4.d1, - d4=x.w4.d2 * z_pow1_11.z_4.d2, - ); - - tempvar x_of_z_w5 = UnreducedBigInt5( - d0=x.w5.d0 * z_pow1_11.z_5.d0, - d1=x.w5.d0 * z_pow1_11.z_5.d1 + x.w5.d1 * z_pow1_11.z_5.d0, - d2=x.w5.d0 * z_pow1_11.z_5.d2 + x.w5.d1 * z_pow1_11.z_5.d1 + x.w5.d2 * z_pow1_11.z_5.d0, - d3=x.w5.d1 * z_pow1_11.z_5.d2 + x.w5.d2 * z_pow1_11.z_5.d1, - d4=x.w5.d2 * z_pow1_11.z_5.d2, - ); - - tempvar x_of_z_w6 = UnreducedBigInt5( - d0=x.w6.d0 * z_pow1_11.z_6.d0, - d1=x.w6.d0 * z_pow1_11.z_6.d1 + x.w6.d1 * z_pow1_11.z_6.d0, - d2=x.w6.d0 * z_pow1_11.z_6.d2 + x.w6.d1 * z_pow1_11.z_6.d1 + x.w6.d2 * z_pow1_11.z_6.d0, - d3=x.w6.d1 * z_pow1_11.z_6.d2 + x.w6.d2 * z_pow1_11.z_6.d1, - d4=x.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar x_of_z_w7 = UnreducedBigInt5( - d0=x.w7.d0 * z_pow1_11.z_7.d0, - d1=x.w7.d0 * z_pow1_11.z_7.d1 + x.w7.d1 * z_pow1_11.z_7.d0, - d2=x.w7.d0 * z_pow1_11.z_7.d2 + x.w7.d1 * z_pow1_11.z_7.d1 + x.w7.d2 * z_pow1_11.z_7.d0, - d3=x.w7.d1 * z_pow1_11.z_7.d2 + x.w7.d2 * z_pow1_11.z_7.d1, - d4=x.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar x_of_z_w8 = UnreducedBigInt5( - d0=x.w8.d0 * z_pow1_11.z_8.d0, - d1=x.w8.d0 * z_pow1_11.z_8.d1 + x.w8.d1 * z_pow1_11.z_8.d0, - d2=x.w8.d0 * z_pow1_11.z_8.d2 + x.w8.d1 * z_pow1_11.z_8.d1 + x.w8.d2 * z_pow1_11.z_8.d0, - d3=x.w8.d1 * z_pow1_11.z_8.d2 + x.w8.d2 * z_pow1_11.z_8.d1, - d4=x.w8.d2 * z_pow1_11.z_8.d2, - ); - - tempvar x_of_z_w9 = UnreducedBigInt5( - d0=x.w9.d0 * z_pow1_11.z_9.d0, - d1=x.w9.d0 * z_pow1_11.z_9.d1 + x.w9.d1 * z_pow1_11.z_9.d0, - d2=x.w9.d0 * z_pow1_11.z_9.d2 + x.w9.d1 * z_pow1_11.z_9.d1 + x.w9.d2 * z_pow1_11.z_9.d0, - d3=x.w9.d1 * z_pow1_11.z_9.d2 + x.w9.d2 * z_pow1_11.z_9.d1, - d4=x.w9.d2 * z_pow1_11.z_9.d2, - ); - - tempvar x_of_z_w10 = UnreducedBigInt5( - d0=x.w10.d0 * z_pow1_11.z_10.d0, - d1=x.w10.d0 * z_pow1_11.z_10.d1 + x.w10.d1 * z_pow1_11.z_10.d0, - d2=x.w10.d0 * z_pow1_11.z_10.d2 + x.w10.d1 * z_pow1_11.z_10.d1 + x.w10.d2 * - z_pow1_11.z_10.d0, - d3=x.w10.d1 * z_pow1_11.z_10.d2 + x.w10.d2 * z_pow1_11.z_10.d1, - d4=x.w10.d2 * z_pow1_11.z_10.d2, - ); - - tempvar x_of_z_w11 = UnreducedBigInt5( - d0=x.w11.d0 * z_pow1_11.z_11.d0, - d1=x.w11.d0 * z_pow1_11.z_11.d1 + x.w11.d1 * z_pow1_11.z_11.d0, - d2=x.w11.d0 * z_pow1_11.z_11.d2 + x.w11.d1 * z_pow1_11.z_11.d1 + x.w11.d2 * - z_pow1_11.z_11.d0, - d3=x.w11.d1 * z_pow1_11.z_11.d2 + x.w11.d2 * z_pow1_11.z_11.d1, - d4=x.w11.d2 * z_pow1_11.z_11.d2, - ); - - let x_of_z = reduce_5( - UnreducedBigInt5( - d0=x.w0.d0 + x_of_z_w1.d0 + x_of_z_w2.d0 + x_of_z_w3.d0 + x_of_z_w4.d0 + - x_of_z_w5.d0 + x_of_z_w6.d0 + x_of_z_w7.d0 + x_of_z_w8.d0 + x_of_z_w9.d0 + - x_of_z_w10.d0 + x_of_z_w11.d0, - d1=x.w0.d1 + x_of_z_w1.d1 + x_of_z_w2.d1 + x_of_z_w3.d1 + x_of_z_w4.d1 + - x_of_z_w5.d1 + x_of_z_w6.d1 + x_of_z_w7.d1 + x_of_z_w8.d1 + x_of_z_w9.d1 + - x_of_z_w10.d1 + x_of_z_w11.d1, - d2=x.w0.d2 + x_of_z_w1.d2 + x_of_z_w2.d2 + x_of_z_w3.d2 + x_of_z_w4.d2 + - x_of_z_w5.d2 + x_of_z_w6.d2 + x_of_z_w7.d2 + x_of_z_w8.d2 + x_of_z_w9.d2 + - x_of_z_w10.d2 + x_of_z_w11.d2, - d3=x_of_z_w1.d3 + x_of_z_w2.d3 + x_of_z_w3.d3 + x_of_z_w4.d3 + x_of_z_w5.d3 + - x_of_z_w6.d3 + x_of_z_w7.d3 + x_of_z_w8.d3 + x_of_z_w9.d3 + x_of_z_w10.d3 + - x_of_z_w11.d3, - d4=x_of_z_w1.d4 + x_of_z_w2.d4 + x_of_z_w3.d4 + x_of_z_w4.d4 + x_of_z_w5.d4 + - x_of_z_w6.d4 + x_of_z_w7.d4 + x_of_z_w8.d4 + x_of_z_w9.d4 + x_of_z_w10.d4 + - x_of_z_w11.d4, - ), - ); - - tempvar y_of_z_w1 = UnreducedBigInt5( - d0=y.w1.d0 * z_pow1_11.z_1.d0, - d1=y.w1.d0 * z_pow1_11.z_1.d1 + y.w1.d1 * z_pow1_11.z_1.d0, - d2=y.w1.d0 * z_pow1_11.z_1.d2 + y.w1.d1 * z_pow1_11.z_1.d1 + y.w1.d2 * z_pow1_11.z_1.d0, - d3=y.w1.d1 * z_pow1_11.z_1.d2 + y.w1.d2 * z_pow1_11.z_1.d1, - d4=y.w1.d2 * z_pow1_11.z_1.d2, - ); - tempvar y_of_z_w2 = UnreducedBigInt5( - d0=y.w2.d0 * z_pow1_11.z_2.d0, - d1=y.w2.d0 * z_pow1_11.z_2.d1 + y.w2.d1 * z_pow1_11.z_2.d0, - d2=y.w2.d0 * z_pow1_11.z_2.d2 + y.w2.d1 * z_pow1_11.z_2.d1 + y.w2.d2 * z_pow1_11.z_2.d0, - d3=y.w2.d1 * z_pow1_11.z_2.d2 + y.w2.d2 * z_pow1_11.z_2.d1, - d4=y.w2.d2 * z_pow1_11.z_2.d2, - ); - tempvar y_of_z_w3 = UnreducedBigInt5( - d0=y.w3.d0 * z_pow1_11.z_3.d0, - d1=y.w3.d0 * z_pow1_11.z_3.d1 + y.w3.d1 * z_pow1_11.z_3.d0, - d2=y.w3.d0 * z_pow1_11.z_3.d2 + y.w3.d1 * z_pow1_11.z_3.d1 + y.w3.d2 * z_pow1_11.z_3.d0, - d3=y.w3.d1 * z_pow1_11.z_3.d2 + y.w3.d2 * z_pow1_11.z_3.d1, - d4=y.w3.d2 * z_pow1_11.z_3.d2, - ); - tempvar y_of_z_w4 = UnreducedBigInt5( - d0=y.w4.d0 * z_pow1_11.z_4.d0, - d1=y.w4.d0 * z_pow1_11.z_4.d1 + y.w4.d1 * z_pow1_11.z_4.d0, - d2=y.w4.d0 * z_pow1_11.z_4.d2 + y.w4.d1 * z_pow1_11.z_4.d1 + y.w4.d2 * z_pow1_11.z_4.d0, - d3=y.w4.d1 * z_pow1_11.z_4.d2 + y.w4.d2 * z_pow1_11.z_4.d1, - d4=y.w4.d2 * z_pow1_11.z_4.d2, - ); - - tempvar y_of_z_w6 = UnreducedBigInt5( - d0=y.w6.d0 * z_pow1_11.z_6.d0, - d1=y.w6.d0 * z_pow1_11.z_6.d1 + y.w6.d1 * z_pow1_11.z_6.d0, - d2=y.w6.d0 * z_pow1_11.z_6.d2 + y.w6.d1 * z_pow1_11.z_6.d1 + y.w6.d2 * z_pow1_11.z_6.d0, - d3=y.w6.d1 * z_pow1_11.z_6.d2 + y.w6.d2 * z_pow1_11.z_6.d1, - d4=y.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar y_of_z_w7 = UnreducedBigInt5( - d0=y.w7.d0 * z_pow1_11.z_7.d0, - d1=y.w7.d0 * z_pow1_11.z_7.d1 + y.w7.d1 * z_pow1_11.z_7.d0, - d2=y.w7.d0 * z_pow1_11.z_7.d2 + y.w7.d1 * z_pow1_11.z_7.d1 + y.w7.d2 * z_pow1_11.z_7.d0, - d3=y.w7.d1 * z_pow1_11.z_7.d2 + y.w7.d2 * z_pow1_11.z_7.d1, - d4=y.w7.d2 * z_pow1_11.z_7.d2, - ); - tempvar y_of_z_w8 = UnreducedBigInt5( - d0=y.w8.d0 * z_pow1_11.z_8.d0, - d1=y.w8.d0 * z_pow1_11.z_8.d1 + y.w8.d1 * z_pow1_11.z_8.d0, - d2=y.w8.d0 * z_pow1_11.z_8.d2 + y.w8.d1 * z_pow1_11.z_8.d1 + y.w8.d2 * z_pow1_11.z_8.d0, - d3=y.w8.d1 * z_pow1_11.z_8.d2 + y.w8.d2 * z_pow1_11.z_8.d1, - d4=y.w8.d2 * z_pow1_11.z_8.d2, - ); - tempvar y_of_z_w9 = UnreducedBigInt5( - d0=y.w9.d0 * z_pow1_11.z_9.d0, - d1=y.w9.d0 * z_pow1_11.z_9.d1 + y.w9.d1 * z_pow1_11.z_9.d0, - d2=y.w9.d0 * z_pow1_11.z_9.d2 + y.w9.d1 * z_pow1_11.z_9.d1 + y.w9.d2 * z_pow1_11.z_9.d0, - d3=y.w9.d1 * z_pow1_11.z_9.d2 + y.w9.d2 * z_pow1_11.z_9.d1, - d4=y.w9.d2 * z_pow1_11.z_9.d2, - ); - tempvar y_of_z_w10 = UnreducedBigInt5( - d0=y.w10.d0 * z_pow1_11.z_10.d0, - d1=y.w10.d0 * z_pow1_11.z_10.d1 + y.w10.d1 * z_pow1_11.z_10.d0, - d2=y.w10.d0 * z_pow1_11.z_10.d2 + y.w10.d1 * z_pow1_11.z_10.d1 + y.w10.d2 * - z_pow1_11.z_10.d0, - d3=y.w10.d1 * z_pow1_11.z_10.d2 + y.w10.d2 * z_pow1_11.z_10.d1, - d4=y.w10.d2 * z_pow1_11.z_10.d2, - ); - tempvar y_of_z_w11 = UnreducedBigInt5( - d0=y.w11.d0 * z_pow1_11.z_11.d0, - d1=y.w11.d0 * z_pow1_11.z_11.d1 + y.w11.d1 * z_pow1_11.z_11.d0, - d2=y.w11.d0 * z_pow1_11.z_11.d2 + y.w11.d1 * z_pow1_11.z_11.d1 + y.w11.d2 * - z_pow1_11.z_11.d0, - d3=y.w11.d1 * z_pow1_11.z_11.d2 + y.w11.d2 * z_pow1_11.z_11.d1, - d4=y.w11.d2 * z_pow1_11.z_11.d2, - ); - let y_of_z = reduce_5( - UnreducedBigInt5( - d0=y.w0.d0 + y_of_z_w1.d0 + y_of_z_w2.d0 + y_of_z_w3.d0 + y_of_z_w4.d0 + - y_of_z_w6.d0 + y_of_z_w7.d0 + y_of_z_w8.d0 + y_of_z_w9.d0 + y_of_z_w10.d0 + - y_of_z_w11.d0, - d1=y.w0.d1 + y_of_z_w1.d1 + y_of_z_w2.d1 + y_of_z_w3.d1 + y_of_z_w4.d1 + - y_of_z_w6.d1 + y_of_z_w7.d1 + y_of_z_w8.d1 + y_of_z_w9.d1 + y_of_z_w10.d1 + - y_of_z_w11.d1, - d2=y.w0.d2 + y_of_z_w1.d2 + y_of_z_w2.d2 + y_of_z_w3.d2 + y_of_z_w4.d2 + - y_of_z_w6.d2 + y_of_z_w7.d2 + y_of_z_w8.d2 + y_of_z_w9.d2 + y_of_z_w10.d2 + - y_of_z_w11.d2, - d3=y_of_z_w1.d3 + y_of_z_w2.d3 + y_of_z_w3.d3 + y_of_z_w4.d3 + y_of_z_w6.d3 + - y_of_z_w7.d3 + y_of_z_w8.d3 + y_of_z_w9.d3 + y_of_z_w10.d3 + y_of_z_w11.d3, - d4=y_of_z_w1.d4 + y_of_z_w2.d4 + y_of_z_w3.d4 + y_of_z_w4.d4 + y_of_z_w6.d4 + - y_of_z_w7.d4 + y_of_z_w8.d4 + y_of_z_w9.d4 + y_of_z_w10.d4 + y_of_z_w11.d4, - ), - ); - - let xy_acc = reduce_5( - UnreducedBigInt5( - d0=x_of_z.d0 * y_of_z.d0, - d1=x_of_z.d0 * y_of_z.d1 + x_of_z.d1 * y_of_z.d0, - d2=x_of_z.d0 * y_of_z.d2 + x_of_z.d1 * y_of_z.d1 + x_of_z.d2 * y_of_z.d0, - d3=x_of_z.d1 * y_of_z.d2 + x_of_z.d2 * y_of_z.d1, - d4=x_of_z.d2 * y_of_z.d2, - ), - ); - - let poseidon_ptr = poseidon_ptr + 64 * PoseidonBuiltin.SIZE; - let continuable_hash = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let random_linear_combination_coeff = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s1; - assert bitwise_ptr.x = random_linear_combination_coeff; - assert bitwise_ptr.y = BASE_MIN_1; - tempvar c_i = bitwise_ptr.x_and_y; - let bitwise_ptr = bitwise_ptr + BitwiseBuiltin.SIZE; - - local poly_acc_12_f: PolyAcc12 = PolyAcc12( - xy=UnreducedBigInt3( - d0=poly_acc_12.xy.d0 + c_i * xy_acc.d0, - d1=poly_acc_12.xy.d1 + c_i * xy_acc.d1, - d2=poly_acc_12.xy.d2 + c_i * xy_acc.d2, - ), - q=E11DU( - Uint256( - c_i * q_w.w0.low + poly_acc_12.q.w0.low, - c_i * q_w.w0.high + poly_acc_12.q.w0.high, - ), - Uint256( - c_i * q_w.w1.low + poly_acc_12.q.w1.low, - c_i * q_w.w1.high + poly_acc_12.q.w1.high, - ), - Uint256( - c_i * q_w.w2.low + poly_acc_12.q.w2.low, - c_i * q_w.w2.high + poly_acc_12.q.w2.high, - ), - Uint256( - c_i * q_w.w3.low + poly_acc_12.q.w3.low, - c_i * q_w.w3.high + poly_acc_12.q.w3.high, - ), - Uint256( - c_i * q_w.w4.low + poly_acc_12.q.w4.low, - c_i * q_w.w4.high + poly_acc_12.q.w4.high, - ), - Uint256( - c_i * q_w.w5.low + poly_acc_12.q.w5.low, - c_i * q_w.w5.high + poly_acc_12.q.w5.high, - ), - Uint256( - c_i * q_w.w6.low + poly_acc_12.q.w6.low, - c_i * q_w.w6.high + poly_acc_12.q.w6.high, - ), - Uint256( - c_i * q_w.w7.low + poly_acc_12.q.w7.low, - c_i * q_w.w7.high + poly_acc_12.q.w7.high, - ), - Uint256( - c_i * q_w.w8.low + poly_acc_12.q.w8.low, - c_i * q_w.w8.high + poly_acc_12.q.w8.high, - ), - Uint256( - c_i * q_w.w9.low + poly_acc_12.q.w9.low, - c_i * q_w.w9.high + poly_acc_12.q.w9.high, - ), - Uint256( - c_i * q_w.w10.low + poly_acc_12.q.w10.low, - c_i * q_w.w10.high + poly_acc_12.q.w10.high, - ), - ), - r=E12DU( - UnreducedBigInt3( - d0=c_i * r_w.w0.d0 + poly_acc_12.r.w0.d0, - d1=c_i * r_w.w0.d1 + poly_acc_12.r.w0.d1, - d2=c_i * r_w.w0.d2 + poly_acc_12.r.w0.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w1.d0 + poly_acc_12.r.w1.d0, - d1=c_i * r_w.w1.d1 + poly_acc_12.r.w1.d1, - d2=c_i * r_w.w1.d2 + poly_acc_12.r.w1.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w2.d0 + poly_acc_12.r.w2.d0, - d1=c_i * r_w.w2.d1 + poly_acc_12.r.w2.d1, - d2=c_i * r_w.w2.d2 + poly_acc_12.r.w2.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w3.d0 + poly_acc_12.r.w3.d0, - d1=c_i * r_w.w3.d1 + poly_acc_12.r.w3.d1, - d2=c_i * r_w.w3.d2 + poly_acc_12.r.w3.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w4.d0 + poly_acc_12.r.w4.d0, - d1=c_i * r_w.w4.d1 + poly_acc_12.r.w4.d1, - d2=c_i * r_w.w4.d2 + poly_acc_12.r.w4.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w5.d0 + poly_acc_12.r.w5.d0, - d1=c_i * r_w.w5.d1 + poly_acc_12.r.w5.d1, - d2=c_i * r_w.w5.d2 + poly_acc_12.r.w5.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w6.d0 + poly_acc_12.r.w6.d0, - d1=c_i * r_w.w6.d1 + poly_acc_12.r.w6.d1, - d2=c_i * r_w.w6.d2 + poly_acc_12.r.w6.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w7.d0 + poly_acc_12.r.w7.d0, - d1=c_i * r_w.w7.d1 + poly_acc_12.r.w7.d1, - d2=c_i * r_w.w7.d2 + poly_acc_12.r.w7.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w8.d0 + poly_acc_12.r.w8.d0, - d1=c_i * r_w.w8.d1 + poly_acc_12.r.w8.d1, - d2=c_i * r_w.w8.d2 + poly_acc_12.r.w8.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w9.d0 + poly_acc_12.r.w9.d0, - d1=c_i * r_w.w9.d1 + poly_acc_12.r.w9.d1, - d2=c_i * r_w.w9.d2 + poly_acc_12.r.w9.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w10.d0 + poly_acc_12.r.w10.d0, - d1=c_i * r_w.w10.d1 + poly_acc_12.r.w10.d1, - d2=c_i * r_w.w10.d2 + poly_acc_12.r.w10.d2, - ), - UnreducedBigInt3( - d0=c_i * r_w.w11.d0 + poly_acc_12.r.w11.d0, - d1=c_i * r_w.w11.d1 + poly_acc_12.r.w11.d1, - d2=c_i * r_w.w11.d2 + poly_acc_12.r.w11.d2, - ), - ), - ); - let poly_acc_12 = &poly_acc_12_f; - - return &r_w; - } -} - -namespace e12 { - func mul_trick_pure{range_check_ptr, poseidon_ptr: PoseidonBuiltin*}( - x_ptr: E12D*, y_ptr: E12D* - ) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E12D = [x_ptr]; - local y: E12D = [y_ptr]; - local r_w: E12D; - local q_w: E11DU3; - %{ - from tools.py.polynomial import Polynomial - from tools.py.field import BaseFieldElement, BaseField - from src.bn254.curve import IRREDUCIBLE_POLY_12 - from starkware.cairo.common.cairo_secp.secp_utils import split - from starkware.cairo.common.math_utils import as_int - from tools.make.utils import split_128 - - p=0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 - field = BaseField(p) - x=12*[0] - y=12*[0] - x_refs = [ids.x.w0, ids.x.w1, ids.x.w2, ids.x.w3, ids.x.w4, ids.x.w5, ids.x.w6, ids.x.w7, ids.x.w8, ids.x.w9, ids.x.w10, ids.x.w11] - y_refs = [ids.y.w0, ids.y.w1, ids.y.w2, ids.y.w3, ids.y.w4, ids.y.w5, ids.y.w6, ids.y.w7, ids.y.w8, ids.y.w9, ids.y.w10, ids.y.w11] - for i in range(ids.N_LIMBS): - for k in range(12): - x[k]+=as_int(getattr(x_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - for k in range(12): - y[k]+=as_int(getattr(y_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - x_poly=Polynomial([BaseFieldElement(x[i], field) for i in range(12)]) - y_poly=Polynomial([BaseFieldElement(y[i], field) for i in range(12)]) - z_poly=x_poly*y_poly - z_polyr=z_poly % IRREDUCIBLE_POLY_12 - z_polyq=z_poly // IRREDUCIBLE_POLY_12 - z_polyr_coeffs = z_polyr.get_coeffs() - z_polyq_coeffs = z_polyq.get_coeffs() - assert len(z_polyq_coeffs)<=11, f"len z_polyq_coeffs: {len(z_polyq_coeffs)}, degree: {z_polyq.degree()}" - assert len(z_polyr_coeffs)<=12, f"len z_polyr_coeffs: {z_polyr_coeffs}, degree: {z_polyr.degree()}" - #print(f"Z_PolyR034034: {z_polyr_coeffs}") - # extend z_polyq with 0 to make it len 9: - z_polyq_coeffs = z_polyq_coeffs + (11-len(z_polyq_coeffs))*[0] - # extend z_polyr with 0 to make it len 12: - z_polyr_coeffs = z_polyr_coeffs + (12-len(z_polyr_coeffs))*[0] - #expected = flatten(mul_e12_gnark(pack_e12(x_gnark), pack_e12(y_gnark))) - #assert expected==w_to_gnark(z_polyr_coeffs) - #print(f"Z_PolyR: {z_polyr_coeffs}") - #print(f"Z_PolyR_to_gnark: {w_to_gnark(z_polyr_coeffs)}") - for i in range(12): - val = split(z_polyr_coeffs[i]%p) - for k in range(3): - rsetattr(ids.r_w, f'w{i}.d{k}', val[k]) - for i in range(11): - val = split(z_polyq_coeffs[i]%p) - for k in range(3): - rsetattr(ids.q_w, f'w{i}.d{k}', val[k]) - %} - assert [range_check_ptr + 0] = r_w.w0.d0; - assert [range_check_ptr + 1] = r_w.w0.d1; - assert [range_check_ptr + 2] = r_w.w0.d2; - assert [range_check_ptr + 3] = r_w.w1.d0; - assert [range_check_ptr + 4] = r_w.w1.d1; - assert [range_check_ptr + 5] = r_w.w1.d2; - assert [range_check_ptr + 6] = r_w.w2.d0; - assert [range_check_ptr + 7] = r_w.w2.d1; - assert [range_check_ptr + 8] = r_w.w2.d2; - assert [range_check_ptr + 9] = r_w.w3.d0; - assert [range_check_ptr + 10] = r_w.w3.d1; - assert [range_check_ptr + 11] = r_w.w3.d2; - assert [range_check_ptr + 12] = r_w.w4.d0; - assert [range_check_ptr + 13] = r_w.w4.d1; - assert [range_check_ptr + 14] = r_w.w4.d2; - assert [range_check_ptr + 15] = r_w.w5.d0; - assert [range_check_ptr + 16] = r_w.w5.d1; - assert [range_check_ptr + 17] = r_w.w5.d2; - assert [range_check_ptr + 18] = r_w.w6.d0; - assert [range_check_ptr + 19] = r_w.w6.d1; - assert [range_check_ptr + 20] = r_w.w6.d2; - assert [range_check_ptr + 21] = r_w.w7.d0; - assert [range_check_ptr + 22] = r_w.w7.d1; - assert [range_check_ptr + 23] = r_w.w7.d2; - assert [range_check_ptr + 24] = r_w.w8.d0; - assert [range_check_ptr + 25] = r_w.w8.d1; - assert [range_check_ptr + 26] = r_w.w8.d2; - assert [range_check_ptr + 27] = r_w.w9.d0; - assert [range_check_ptr + 28] = r_w.w9.d1; - assert [range_check_ptr + 29] = r_w.w9.d2; - assert [range_check_ptr + 30] = r_w.w10.d0; - assert [range_check_ptr + 31] = r_w.w10.d1; - assert [range_check_ptr + 32] = r_w.w10.d2; - assert [range_check_ptr + 33] = r_w.w11.d0; - assert [range_check_ptr + 34] = r_w.w11.d1; - assert [range_check_ptr + 35] = r_w.w11.d2; - assert [range_check_ptr + 36] = q_w.w0.d0; - assert [range_check_ptr + 37] = q_w.w0.d1; - assert [range_check_ptr + 38] = q_w.w0.d2; - assert [range_check_ptr + 39] = q_w.w1.d0; - assert [range_check_ptr + 40] = q_w.w1.d1; - assert [range_check_ptr + 41] = q_w.w1.d2; - assert [range_check_ptr + 42] = q_w.w2.d0; - assert [range_check_ptr + 43] = q_w.w2.d1; - assert [range_check_ptr + 44] = q_w.w2.d2; - assert [range_check_ptr + 45] = q_w.w3.d0; - assert [range_check_ptr + 46] = q_w.w3.d1; - assert [range_check_ptr + 47] = q_w.w3.d2; - assert [range_check_ptr + 48] = q_w.w4.d0; - assert [range_check_ptr + 49] = q_w.w4.d1; - assert [range_check_ptr + 50] = q_w.w4.d2; - assert [range_check_ptr + 51] = q_w.w5.d0; - assert [range_check_ptr + 52] = q_w.w5.d1; - assert [range_check_ptr + 53] = q_w.w5.d2; - assert [range_check_ptr + 54] = q_w.w6.d0; - assert [range_check_ptr + 55] = q_w.w6.d1; - assert [range_check_ptr + 56] = q_w.w6.d2; - assert [range_check_ptr + 57] = q_w.w7.d0; - assert [range_check_ptr + 58] = q_w.w7.d1; - assert [range_check_ptr + 59] = q_w.w7.d2; - assert [range_check_ptr + 60] = q_w.w8.d0; - assert [range_check_ptr + 61] = q_w.w8.d1; - assert [range_check_ptr + 62] = q_w.w8.d2; - assert [range_check_ptr + 63] = q_w.w9.d0; - assert [range_check_ptr + 64] = q_w.w9.d1; - assert [range_check_ptr + 65] = q_w.w9.d2; - assert [range_check_ptr + 66] = q_w.w10.d0; - assert [range_check_ptr + 67] = q_w.w10.d1; - assert [range_check_ptr + 68] = q_w.w10.d2; - assert [range_check_ptr + 69] = (12 + 11) * 3 * BASE_MIN_1 - ( - r_w.w0.d0 + - r_w.w0.d1 + - r_w.w0.d2 + - r_w.w1.d0 + - r_w.w1.d1 + - r_w.w1.d2 + - r_w.w2.d0 + - r_w.w2.d1 + - r_w.w2.d2 + - r_w.w3.d0 + - r_w.w3.d1 + - r_w.w3.d2 + - r_w.w4.d0 + - r_w.w4.d1 + - r_w.w4.d2 + - r_w.w5.d0 + - r_w.w5.d1 + - r_w.w5.d2 + - r_w.w6.d0 + - r_w.w6.d1 + - r_w.w6.d2 + - r_w.w7.d0 + - r_w.w7.d1 + - r_w.w7.d2 + - r_w.w8.d0 + - r_w.w8.d1 + - r_w.w8.d2 + - r_w.w9.d0 + - r_w.w9.d1 + - r_w.w9.d2 + - r_w.w10.d0 + - r_w.w10.d1 + - r_w.w10.d2 + - r_w.w11.d0 + - r_w.w11.d1 + - r_w.w11.d2 + - q_w.w0.d0 + - q_w.w0.d1 + - q_w.w0.d2 + - q_w.w1.d0 + - q_w.w1.d1 + - q_w.w1.d2 + - q_w.w2.d0 + - q_w.w2.d1 + - q_w.w2.d2 + - q_w.w3.d0 + - q_w.w3.d1 + - q_w.w3.d2 + - q_w.w4.d0 + - q_w.w4.d1 + - q_w.w4.d2 + - q_w.w5.d0 + - q_w.w5.d1 + - q_w.w5.d2 + - q_w.w6.d0 + - q_w.w6.d1 + - q_w.w6.d2 + - q_w.w7.d0 + - q_w.w7.d1 + - q_w.w7.d2 + - q_w.w8.d0 + - q_w.w8.d1 + - q_w.w8.d2 + - q_w.w9.d0 + - q_w.w9.d1 + - q_w.w9.d2 + - q_w.w10.d0 + - q_w.w10.d1 + - q_w.w10.d2 - ); - tempvar range_check_ptr = range_check_ptr + 70; - - tempvar two = 2; - tempvar ptr = cast(poseidon_ptr, felt); - assert poseidon_ptr.input = PoseidonBuiltinState(s0=x.w0.d0 * x.w0.d1, s1=0, s2=two); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=x.w0.d2 * x.w1.d0, s1=[ptr + 3], s2=two - ); - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=x.w1.d1 * x.w1.d2, s1=poseidon_ptr[1].output.s0, s2=two - ); - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=x.w2.d0 * x.w2.d1, s1=poseidon_ptr[2].output.s0, s2=two - ); - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=x.w2.d2 * x.w3.d0, s1=poseidon_ptr[3].output.s0, s2=two - ); - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=x.w3.d1 * x.w3.d2, s1=poseidon_ptr[4].output.s0, s2=two - ); - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=x.w4.d0 * x.w4.d1, s1=poseidon_ptr[5].output.s0, s2=two - ); - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=x.w4.d2 * x.w5.d0, s1=poseidon_ptr[6].output.s0, s2=two - ); - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=x.w5.d1 * x.w5.d2, s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=x.w6.d0 * x.w6.d1, s1=poseidon_ptr[8].output.s0, s2=two - ); - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=x.w6.d2 * x.w7.d0, s1=poseidon_ptr[9].output.s0, s2=two - ); - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=x.w7.d1 * x.w7.d2, s1=poseidon_ptr[10].output.s0, s2=two - ); - assert poseidon_ptr[12].input = PoseidonBuiltinState( - s0=x.w8.d0 * x.w8.d1, s1=poseidon_ptr[11].output.s0, s2=two - ); - assert poseidon_ptr[13].input = PoseidonBuiltinState( - s0=x.w8.d2 * x.w9.d0, s1=poseidon_ptr[12].output.s0, s2=two - ); - assert poseidon_ptr[14].input = PoseidonBuiltinState( - s0=x.w9.d1 * x.w9.d2, s1=poseidon_ptr[13].output.s0, s2=two - ); - assert poseidon_ptr[15].input = PoseidonBuiltinState( - s0=x.w10.d0 * x.w10.d1, s1=poseidon_ptr[14].output.s0, s2=two - ); - assert poseidon_ptr[16].input = PoseidonBuiltinState( - s0=x.w10.d2 * x.w11.d0, s1=poseidon_ptr[15].output.s0, s2=two - ); - assert poseidon_ptr[17].input = PoseidonBuiltinState( - s0=x.w11.d1 * x.w11.d2, s1=poseidon_ptr[16].output.s0, s2=two - ); - assert poseidon_ptr[18].input = PoseidonBuiltinState( - s0=y.w0.d0 * y.w0.d1, s1=poseidon_ptr[17].output.s0, s2=two - ); - assert poseidon_ptr[19].input = PoseidonBuiltinState( - s0=y.w0.d2 * y.w1.d0, s1=poseidon_ptr[18].output.s0, s2=two - ); - assert poseidon_ptr[20].input = PoseidonBuiltinState( - s0=y.w1.d1 * y.w1.d2, s1=poseidon_ptr[19].output.s0, s2=two - ); - assert poseidon_ptr[21].input = PoseidonBuiltinState( - s0=y.w2.d0 * y.w2.d1, s1=poseidon_ptr[20].output.s0, s2=two - ); - assert poseidon_ptr[22].input = PoseidonBuiltinState( - s0=y.w2.d2 * y.w3.d0, s1=poseidon_ptr[21].output.s0, s2=two - ); - assert poseidon_ptr[23].input = PoseidonBuiltinState( - s0=y.w3.d1 * y.w3.d2, s1=poseidon_ptr[22].output.s0, s2=two - ); - assert poseidon_ptr[24].input = PoseidonBuiltinState( - s0=y.w4.d0 * y.w4.d1, s1=poseidon_ptr[23].output.s0, s2=two - ); - assert poseidon_ptr[25].input = PoseidonBuiltinState( - s0=y.w4.d2 * y.w5.d0, s1=poseidon_ptr[24].output.s0, s2=two - ); - assert poseidon_ptr[26].input = PoseidonBuiltinState( - s0=y.w5.d1 * y.w5.d2, s1=poseidon_ptr[25].output.s0, s2=two - ); - assert poseidon_ptr[27].input = PoseidonBuiltinState( - s0=y.w6.d0 * y.w6.d1, s1=poseidon_ptr[26].output.s0, s2=two - ); - assert poseidon_ptr[28].input = PoseidonBuiltinState( - s0=y.w6.d2 * y.w7.d0, s1=poseidon_ptr[27].output.s0, s2=two - ); - assert poseidon_ptr[29].input = PoseidonBuiltinState( - s0=y.w7.d1 * y.w7.d2, s1=poseidon_ptr[28].output.s0, s2=two - ); - assert poseidon_ptr[30].input = PoseidonBuiltinState( - s0=y.w8.d0 * y.w8.d1, s1=poseidon_ptr[29].output.s0, s2=two - ); - assert poseidon_ptr[31].input = PoseidonBuiltinState( - s0=y.w8.d2 * y.w9.d0, s1=poseidon_ptr[30].output.s0, s2=two - ); - assert poseidon_ptr[32].input = PoseidonBuiltinState( - s0=y.w9.d1 * y.w9.d2, s1=poseidon_ptr[31].output.s0, s2=two - ); - assert poseidon_ptr[33].input = PoseidonBuiltinState( - s0=y.w10.d0 * y.w10.d1, s1=poseidon_ptr[32].output.s0, s2=two - ); - assert poseidon_ptr[34].input = PoseidonBuiltinState( - s0=y.w10.d2 * y.w11.d0, s1=poseidon_ptr[33].output.s0, s2=two - ); - assert poseidon_ptr[35].input = PoseidonBuiltinState( - s0=y.w11.d1 * y.w11.d2, s1=poseidon_ptr[34].output.s0, s2=two - ); - assert poseidon_ptr[36].input = PoseidonBuiltinState( - s0=q_w.w0.d0 * q_w.w0.d1, s1=poseidon_ptr[35].output.s0, s2=two - ); - assert poseidon_ptr[37].input = PoseidonBuiltinState( - s0=q_w.w0.d2 * q_w.w1.d0, s1=poseidon_ptr[36].output.s0, s2=two - ); - assert poseidon_ptr[38].input = PoseidonBuiltinState( - s0=q_w.w1.d1 * q_w.w1.d2, s1=poseidon_ptr[37].output.s0, s2=two - ); - assert poseidon_ptr[39].input = PoseidonBuiltinState( - s0=q_w.w2.d0 * q_w.w2.d1, s1=poseidon_ptr[38].output.s0, s2=two - ); - assert poseidon_ptr[40].input = PoseidonBuiltinState( - s0=q_w.w2.d2 * q_w.w3.d0, s1=poseidon_ptr[39].output.s0, s2=two - ); - assert poseidon_ptr[41].input = PoseidonBuiltinState( - s0=q_w.w3.d1 * q_w.w3.d2, s1=poseidon_ptr[40].output.s0, s2=two - ); - assert poseidon_ptr[42].input = PoseidonBuiltinState( - s0=q_w.w4.d0 * q_w.w4.d1, s1=poseidon_ptr[41].output.s0, s2=two - ); - assert poseidon_ptr[43].input = PoseidonBuiltinState( - s0=q_w.w4.d2 * q_w.w5.d0, s1=poseidon_ptr[42].output.s0, s2=two - ); - assert poseidon_ptr[44].input = PoseidonBuiltinState( - s0=q_w.w5.d1 * q_w.w5.d2, s1=poseidon_ptr[43].output.s0, s2=two - ); - assert poseidon_ptr[45].input = PoseidonBuiltinState( - s0=q_w.w6.d0 * q_w.w6.d1, s1=poseidon_ptr[44].output.s0, s2=two - ); - assert poseidon_ptr[46].input = PoseidonBuiltinState( - s0=q_w.w6.d2 * q_w.w7.d0, s1=poseidon_ptr[45].output.s0, s2=two - ); - assert poseidon_ptr[47].input = PoseidonBuiltinState( - s0=q_w.w7.d1 * q_w.w7.d2, s1=poseidon_ptr[46].output.s0, s2=two - ); - assert poseidon_ptr[48].input = PoseidonBuiltinState( - s0=q_w.w8.d0 * q_w.w8.d1, s1=poseidon_ptr[47].output.s0, s2=two - ); - assert poseidon_ptr[49].input = PoseidonBuiltinState( - s0=q_w.w8.d2 * q_w.w9.d0, s1=poseidon_ptr[48].output.s0, s2=two - ); - assert poseidon_ptr[50].input = PoseidonBuiltinState( - s0=q_w.w9.d1 * q_w.w9.d2, s1=poseidon_ptr[49].output.s0, s2=two - ); - assert poseidon_ptr[51].input = PoseidonBuiltinState( - s0=q_w.w10.d0 * q_w.w10.d1, s1=poseidon_ptr[50].output.s0, s2=two - ); - assert poseidon_ptr[52].input = PoseidonBuiltinState( - s0=q_w.w10.d2 * r_w.w0.d0, s1=poseidon_ptr[51].output.s0, s2=two - ); - assert poseidon_ptr[53].input = PoseidonBuiltinState( - s0=r_w.w0.d1 * r_w.w0.d2, s1=poseidon_ptr[52].output.s0, s2=two - ); - assert poseidon_ptr[54].input = PoseidonBuiltinState( - s0=r_w.w1.d0 * r_w.w1.d1, s1=poseidon_ptr[53].output.s0, s2=two - ); - assert poseidon_ptr[55].input = PoseidonBuiltinState( - s0=r_w.w1.d2 * r_w.w2.d0, s1=poseidon_ptr[54].output.s0, s2=two - ); - assert poseidon_ptr[56].input = PoseidonBuiltinState( - s0=r_w.w2.d1 * r_w.w2.d2, s1=poseidon_ptr[55].output.s0, s2=two - ); - assert poseidon_ptr[57].input = PoseidonBuiltinState( - s0=r_w.w3.d0 * r_w.w3.d1, s1=poseidon_ptr[56].output.s0, s2=two - ); - assert poseidon_ptr[58].input = PoseidonBuiltinState( - s0=r_w.w3.d2 * r_w.w4.d0, s1=poseidon_ptr[57].output.s0, s2=two - ); - assert poseidon_ptr[59].input = PoseidonBuiltinState( - s0=r_w.w4.d1 * r_w.w4.d2, s1=poseidon_ptr[58].output.s0, s2=two - ); - assert poseidon_ptr[60].input = PoseidonBuiltinState( - s0=r_w.w5.d0 * r_w.w5.d1, s1=poseidon_ptr[59].output.s0, s2=two - ); - assert poseidon_ptr[61].input = PoseidonBuiltinState( - s0=r_w.w5.d2 * r_w.w6.d0, s1=poseidon_ptr[60].output.s0, s2=two - ); - assert poseidon_ptr[62].input = PoseidonBuiltinState( - s0=r_w.w6.d1 * r_w.w6.d2, s1=poseidon_ptr[61].output.s0, s2=two - ); - assert poseidon_ptr[63].input = PoseidonBuiltinState( - s0=r_w.w7.d0 * r_w.w7.d1, s1=poseidon_ptr[62].output.s0, s2=two - ); - assert poseidon_ptr[64].input = PoseidonBuiltinState( - s0=r_w.w7.d2 * r_w.w8.d0, s1=poseidon_ptr[63].output.s0, s2=two - ); - assert poseidon_ptr[65].input = PoseidonBuiltinState( - s0=r_w.w8.d1 * r_w.w8.d2, s1=poseidon_ptr[64].output.s0, s2=two - ); - assert poseidon_ptr[66].input = PoseidonBuiltinState( - s0=r_w.w9.d0 * r_w.w9.d1, s1=poseidon_ptr[65].output.s0, s2=two - ); - assert poseidon_ptr[67].input = PoseidonBuiltinState( - s0=r_w.w9.d2 * r_w.w10.d0, s1=poseidon_ptr[66].output.s0, s2=two - ); - assert poseidon_ptr[68].input = PoseidonBuiltinState( - s0=r_w.w10.d1 * r_w.w10.d2, s1=poseidon_ptr[67].output.s0, s2=two - ); - assert poseidon_ptr[69].input = PoseidonBuiltinState( - s0=r_w.w11.d0 * r_w.w11.d1, s1=poseidon_ptr[68].output.s0, s2=two - ); - assert poseidon_ptr[70].input = PoseidonBuiltinState( - s0=r_w.w11.d2, s1=poseidon_ptr[69].output.s0, s2=two - ); - - let poseidon_ptr = poseidon_ptr + 71 * PoseidonBuiltin.SIZE; - let Z = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let (Z_bigint3) = felt_to_bigint3(Z); - let z_pow1_11_ptr: ZPowers11* = get_powers_of_z11(Z_bigint3); - local z_pow1_11: ZPowers11 = [z_pow1_11_ptr]; - - tempvar x_of_z_w1 = UnreducedBigInt5( - d0=x.w1.d0 * z_pow1_11.z_1.d0, - d1=x.w1.d0 * z_pow1_11.z_1.d1 + x.w1.d1 * z_pow1_11.z_1.d0, - d2=x.w1.d0 * z_pow1_11.z_1.d2 + x.w1.d1 * z_pow1_11.z_1.d1 + x.w1.d2 * z_pow1_11.z_1.d0, - d3=x.w1.d1 * z_pow1_11.z_1.d2 + x.w1.d2 * z_pow1_11.z_1.d1, - d4=x.w1.d2 * z_pow1_11.z_1.d2, - ); - tempvar x_of_z_w2 = UnreducedBigInt5( - d0=x.w2.d0 * z_pow1_11.z_2.d0, - d1=x.w2.d0 * z_pow1_11.z_2.d1 + x.w2.d1 * z_pow1_11.z_2.d0, - d2=x.w2.d0 * z_pow1_11.z_2.d2 + x.w2.d1 * z_pow1_11.z_2.d1 + x.w2.d2 * z_pow1_11.z_2.d0, - d3=x.w2.d1 * z_pow1_11.z_2.d2 + x.w2.d2 * z_pow1_11.z_2.d1, - d4=x.w2.d2 * z_pow1_11.z_2.d2, - ); - - tempvar x_of_z_w3 = UnreducedBigInt5( - d0=x.w3.d0 * z_pow1_11.z_3.d0, - d1=x.w3.d0 * z_pow1_11.z_3.d1 + x.w3.d1 * z_pow1_11.z_3.d0, - d2=x.w3.d0 * z_pow1_11.z_3.d2 + x.w3.d1 * z_pow1_11.z_3.d1 + x.w3.d2 * z_pow1_11.z_3.d0, - d3=x.w3.d1 * z_pow1_11.z_3.d2 + x.w3.d2 * z_pow1_11.z_3.d1, - d4=x.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar x_of_z_w4 = UnreducedBigInt5( - d0=x.w4.d0 * z_pow1_11.z_4.d0, - d1=x.w4.d0 * z_pow1_11.z_4.d1 + x.w4.d1 * z_pow1_11.z_4.d0, - d2=x.w4.d0 * z_pow1_11.z_4.d2 + x.w4.d1 * z_pow1_11.z_4.d1 + x.w4.d2 * z_pow1_11.z_4.d0, - d3=x.w4.d1 * z_pow1_11.z_4.d2 + x.w4.d2 * z_pow1_11.z_4.d1, - d4=x.w4.d2 * z_pow1_11.z_4.d2, - ); - - tempvar x_of_z_w5 = UnreducedBigInt5( - d0=x.w5.d0 * z_pow1_11.z_5.d0, - d1=x.w5.d0 * z_pow1_11.z_5.d1 + x.w5.d1 * z_pow1_11.z_5.d0, - d2=x.w5.d0 * z_pow1_11.z_5.d2 + x.w5.d1 * z_pow1_11.z_5.d1 + x.w5.d2 * z_pow1_11.z_5.d0, - d3=x.w5.d1 * z_pow1_11.z_5.d2 + x.w5.d2 * z_pow1_11.z_5.d1, - d4=x.w5.d2 * z_pow1_11.z_5.d2, - ); - - tempvar x_of_z_w6 = UnreducedBigInt5( - d0=x.w6.d0 * z_pow1_11.z_6.d0, - d1=x.w6.d0 * z_pow1_11.z_6.d1 + x.w6.d1 * z_pow1_11.z_6.d0, - d2=x.w6.d0 * z_pow1_11.z_6.d2 + x.w6.d1 * z_pow1_11.z_6.d1 + x.w6.d2 * z_pow1_11.z_6.d0, - d3=x.w6.d1 * z_pow1_11.z_6.d2 + x.w6.d2 * z_pow1_11.z_6.d1, - d4=x.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar x_of_z_w7 = UnreducedBigInt5( - d0=x.w7.d0 * z_pow1_11.z_7.d0, - d1=x.w7.d0 * z_pow1_11.z_7.d1 + x.w7.d1 * z_pow1_11.z_7.d0, - d2=x.w7.d0 * z_pow1_11.z_7.d2 + x.w7.d1 * z_pow1_11.z_7.d1 + x.w7.d2 * z_pow1_11.z_7.d0, - d3=x.w7.d1 * z_pow1_11.z_7.d2 + x.w7.d2 * z_pow1_11.z_7.d1, - d4=x.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar x_of_z_w8 = UnreducedBigInt5( - d0=x.w8.d0 * z_pow1_11.z_8.d0, - d1=x.w8.d0 * z_pow1_11.z_8.d1 + x.w8.d1 * z_pow1_11.z_8.d0, - d2=x.w8.d0 * z_pow1_11.z_8.d2 + x.w8.d1 * z_pow1_11.z_8.d1 + x.w8.d2 * z_pow1_11.z_8.d0, - d3=x.w8.d1 * z_pow1_11.z_8.d2 + x.w8.d2 * z_pow1_11.z_8.d1, - d4=x.w8.d2 * z_pow1_11.z_8.d2, - ); - - tempvar x_of_z_w9 = UnreducedBigInt5( - d0=x.w9.d0 * z_pow1_11.z_9.d0, - d1=x.w9.d0 * z_pow1_11.z_9.d1 + x.w9.d1 * z_pow1_11.z_9.d0, - d2=x.w9.d0 * z_pow1_11.z_9.d2 + x.w9.d1 * z_pow1_11.z_9.d1 + x.w9.d2 * z_pow1_11.z_9.d0, - d3=x.w9.d1 * z_pow1_11.z_9.d2 + x.w9.d2 * z_pow1_11.z_9.d1, - d4=x.w9.d2 * z_pow1_11.z_9.d2, - ); - - tempvar x_of_z_w10 = UnreducedBigInt5( - d0=x.w10.d0 * z_pow1_11.z_10.d0, - d1=x.w10.d0 * z_pow1_11.z_10.d1 + x.w10.d1 * z_pow1_11.z_10.d0, - d2=x.w10.d0 * z_pow1_11.z_10.d2 + x.w10.d1 * z_pow1_11.z_10.d1 + x.w10.d2 * - z_pow1_11.z_10.d0, - d3=x.w10.d1 * z_pow1_11.z_10.d2 + x.w10.d2 * z_pow1_11.z_10.d1, - d4=x.w10.d2 * z_pow1_11.z_10.d2, - ); - - tempvar x_of_z_w11 = UnreducedBigInt5( - d0=x.w11.d0 * z_pow1_11.z_11.d0, - d1=x.w11.d0 * z_pow1_11.z_11.d1 + x.w11.d1 * z_pow1_11.z_11.d0, - d2=x.w11.d0 * z_pow1_11.z_11.d2 + x.w11.d1 * z_pow1_11.z_11.d1 + x.w11.d2 * - z_pow1_11.z_11.d0, - d3=x.w11.d1 * z_pow1_11.z_11.d2 + x.w11.d2 * z_pow1_11.z_11.d1, - d4=x.w11.d2 * z_pow1_11.z_11.d2, - ); - - let x_of_z = reduce_5( - UnreducedBigInt5( - d0=x.w0.d0 + x_of_z_w1.d0 + x_of_z_w2.d0 + x_of_z_w3.d0 + x_of_z_w4.d0 + - x_of_z_w5.d0 + x_of_z_w6.d0 + x_of_z_w7.d0 + x_of_z_w8.d0 + x_of_z_w9.d0 + - x_of_z_w10.d0 + x_of_z_w11.d0, - d1=x.w0.d1 + x_of_z_w1.d1 + x_of_z_w2.d1 + x_of_z_w3.d1 + x_of_z_w4.d1 + - x_of_z_w5.d1 + x_of_z_w6.d1 + x_of_z_w7.d1 + x_of_z_w8.d1 + x_of_z_w9.d1 + - x_of_z_w10.d1 + x_of_z_w11.d1, - d2=x.w0.d2 + x_of_z_w1.d2 + x_of_z_w2.d2 + x_of_z_w3.d2 + x_of_z_w4.d2 + - x_of_z_w5.d2 + x_of_z_w6.d2 + x_of_z_w7.d2 + x_of_z_w8.d2 + x_of_z_w9.d2 + - x_of_z_w10.d2 + x_of_z_w11.d2, - d3=x_of_z_w1.d3 + x_of_z_w2.d3 + x_of_z_w3.d3 + x_of_z_w4.d3 + x_of_z_w5.d3 + - x_of_z_w6.d3 + x_of_z_w7.d3 + x_of_z_w8.d3 + x_of_z_w9.d3 + x_of_z_w10.d3 + - x_of_z_w11.d3, - d4=x_of_z_w1.d4 + x_of_z_w2.d4 + x_of_z_w3.d4 + x_of_z_w4.d4 + x_of_z_w5.d4 + - x_of_z_w6.d4 + x_of_z_w7.d4 + x_of_z_w8.d4 + x_of_z_w9.d4 + x_of_z_w10.d4 + - x_of_z_w11.d4, - ), - ); - - tempvar y_of_z_w1 = UnreducedBigInt5( - d0=y.w1.d0 * z_pow1_11.z_1.d0, - d1=y.w1.d0 * z_pow1_11.z_1.d1 + y.w1.d1 * z_pow1_11.z_1.d0, - d2=y.w1.d0 * z_pow1_11.z_1.d2 + y.w1.d1 * z_pow1_11.z_1.d1 + y.w1.d2 * z_pow1_11.z_1.d0, - d3=y.w1.d1 * z_pow1_11.z_1.d2 + y.w1.d2 * z_pow1_11.z_1.d1, - d4=y.w1.d2 * z_pow1_11.z_1.d2, - ); - tempvar y_of_z_w2 = UnreducedBigInt5( - d0=y.w2.d0 * z_pow1_11.z_2.d0, - d1=y.w2.d0 * z_pow1_11.z_2.d1 + y.w2.d1 * z_pow1_11.z_2.d0, - d2=y.w2.d0 * z_pow1_11.z_2.d2 + y.w2.d1 * z_pow1_11.z_2.d1 + y.w2.d2 * z_pow1_11.z_2.d0, - d3=y.w2.d1 * z_pow1_11.z_2.d2 + y.w2.d2 * z_pow1_11.z_2.d1, - d4=y.w2.d2 * z_pow1_11.z_2.d2, - ); - tempvar y_of_z_w3 = UnreducedBigInt5( - d0=y.w3.d0 * z_pow1_11.z_3.d0, - d1=y.w3.d0 * z_pow1_11.z_3.d1 + y.w3.d1 * z_pow1_11.z_3.d0, - d2=y.w3.d0 * z_pow1_11.z_3.d2 + y.w3.d1 * z_pow1_11.z_3.d1 + y.w3.d2 * z_pow1_11.z_3.d0, - d3=y.w3.d1 * z_pow1_11.z_3.d2 + y.w3.d2 * z_pow1_11.z_3.d1, - d4=y.w3.d2 * z_pow1_11.z_3.d2, - ); - tempvar y_of_z_w4 = UnreducedBigInt5( - d0=y.w4.d0 * z_pow1_11.z_4.d0, - d1=y.w4.d0 * z_pow1_11.z_4.d1 + y.w4.d1 * z_pow1_11.z_4.d0, - d2=y.w4.d0 * z_pow1_11.z_4.d2 + y.w4.d1 * z_pow1_11.z_4.d1 + y.w4.d2 * z_pow1_11.z_4.d0, - d3=y.w4.d1 * z_pow1_11.z_4.d2 + y.w4.d2 * z_pow1_11.z_4.d1, - d4=y.w4.d2 * z_pow1_11.z_4.d2, - ); - tempvar y_of_z_w5 = UnreducedBigInt5( - d0=y.w5.d0 * z_pow1_11.z_5.d0, - d1=y.w5.d0 * z_pow1_11.z_5.d1 + y.w5.d1 * z_pow1_11.z_5.d0, - d2=y.w5.d0 * z_pow1_11.z_5.d2 + y.w5.d1 * z_pow1_11.z_5.d1 + y.w5.d2 * z_pow1_11.z_5.d0, - d3=y.w5.d1 * z_pow1_11.z_5.d2 + y.w5.d2 * z_pow1_11.z_5.d1, - d4=y.w5.d2 * z_pow1_11.z_5.d2, - ); - - tempvar y_of_z_w6 = UnreducedBigInt5( - d0=y.w6.d0 * z_pow1_11.z_6.d0, - d1=y.w6.d0 * z_pow1_11.z_6.d1 + y.w6.d1 * z_pow1_11.z_6.d0, - d2=y.w6.d0 * z_pow1_11.z_6.d2 + y.w6.d1 * z_pow1_11.z_6.d1 + y.w6.d2 * z_pow1_11.z_6.d0, - d3=y.w6.d1 * z_pow1_11.z_6.d2 + y.w6.d2 * z_pow1_11.z_6.d1, - d4=y.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar y_of_z_w7 = UnreducedBigInt5( - d0=y.w7.d0 * z_pow1_11.z_7.d0, - d1=y.w7.d0 * z_pow1_11.z_7.d1 + y.w7.d1 * z_pow1_11.z_7.d0, - d2=y.w7.d0 * z_pow1_11.z_7.d2 + y.w7.d1 * z_pow1_11.z_7.d1 + y.w7.d2 * z_pow1_11.z_7.d0, - d3=y.w7.d1 * z_pow1_11.z_7.d2 + y.w7.d2 * z_pow1_11.z_7.d1, - d4=y.w7.d2 * z_pow1_11.z_7.d2, - ); - tempvar y_of_z_w8 = UnreducedBigInt5( - d0=y.w8.d0 * z_pow1_11.z_8.d0, - d1=y.w8.d0 * z_pow1_11.z_8.d1 + y.w8.d1 * z_pow1_11.z_8.d0, - d2=y.w8.d0 * z_pow1_11.z_8.d2 + y.w8.d1 * z_pow1_11.z_8.d1 + y.w8.d2 * z_pow1_11.z_8.d0, - d3=y.w8.d1 * z_pow1_11.z_8.d2 + y.w8.d2 * z_pow1_11.z_8.d1, - d4=y.w8.d2 * z_pow1_11.z_8.d2, - ); - tempvar y_of_z_w9 = UnreducedBigInt5( - d0=y.w9.d0 * z_pow1_11.z_9.d0, - d1=y.w9.d0 * z_pow1_11.z_9.d1 + y.w9.d1 * z_pow1_11.z_9.d0, - d2=y.w9.d0 * z_pow1_11.z_9.d2 + y.w9.d1 * z_pow1_11.z_9.d1 + y.w9.d2 * z_pow1_11.z_9.d0, - d3=y.w9.d1 * z_pow1_11.z_9.d2 + y.w9.d2 * z_pow1_11.z_9.d1, - d4=y.w9.d2 * z_pow1_11.z_9.d2, - ); - tempvar y_of_z_w10 = UnreducedBigInt5( - d0=y.w10.d0 * z_pow1_11.z_10.d0, - d1=y.w10.d0 * z_pow1_11.z_10.d1 + y.w10.d1 * z_pow1_11.z_10.d0, - d2=y.w10.d0 * z_pow1_11.z_10.d2 + y.w10.d1 * z_pow1_11.z_10.d1 + y.w10.d2 * - z_pow1_11.z_10.d0, - d3=y.w10.d1 * z_pow1_11.z_10.d2 + y.w10.d2 * z_pow1_11.z_10.d1, - d4=y.w10.d2 * z_pow1_11.z_10.d2, - ); - tempvar y_of_z_w11 = UnreducedBigInt5( - d0=y.w11.d0 * z_pow1_11.z_11.d0, - d1=y.w11.d0 * z_pow1_11.z_11.d1 + y.w11.d1 * z_pow1_11.z_11.d0, - d2=y.w11.d0 * z_pow1_11.z_11.d2 + y.w11.d1 * z_pow1_11.z_11.d1 + y.w11.d2 * - z_pow1_11.z_11.d0, - d3=y.w11.d1 * z_pow1_11.z_11.d2 + y.w11.d2 * z_pow1_11.z_11.d1, - d4=y.w11.d2 * z_pow1_11.z_11.d2, - ); - let y_of_z = reduce_5( - UnreducedBigInt5( - d0=y.w0.d0 + y_of_z_w1.d0 + y_of_z_w2.d0 + y_of_z_w3.d0 + y_of_z_w4.d0 + - y_of_z_w5.d0 + y_of_z_w6.d0 + y_of_z_w7.d0 + y_of_z_w8.d0 + y_of_z_w9.d0 + - y_of_z_w10.d0 + y_of_z_w11.d0, - d1=y.w0.d1 + y_of_z_w1.d1 + y_of_z_w2.d1 + y_of_z_w3.d1 + y_of_z_w4.d1 + - y_of_z_w5.d1 + y_of_z_w6.d1 + y_of_z_w7.d1 + y_of_z_w8.d1 + y_of_z_w9.d1 + - y_of_z_w10.d1 + y_of_z_w11.d1, - d2=y.w0.d2 + y_of_z_w1.d2 + y_of_z_w2.d2 + y_of_z_w3.d2 + y_of_z_w4.d2 + - y_of_z_w5.d2 + y_of_z_w6.d2 + y_of_z_w7.d2 + y_of_z_w8.d2 + y_of_z_w9.d2 + - y_of_z_w10.d2 + y_of_z_w11.d2, - d3=y_of_z_w1.d3 + y_of_z_w2.d3 + y_of_z_w3.d3 + y_of_z_w4.d3 + y_of_z_w5.d3 + - y_of_z_w6.d3 + y_of_z_w7.d3 + y_of_z_w8.d3 + y_of_z_w9.d3 + y_of_z_w10.d3 + - y_of_z_w11.d3, - d4=y_of_z_w1.d4 + y_of_z_w2.d4 + y_of_z_w3.d4 + y_of_z_w4.d4 + y_of_z_w5.d4 + - y_of_z_w6.d4 + y_of_z_w7.d4 + y_of_z_w8.d4 + y_of_z_w9.d4 + y_of_z_w10.d4 + - y_of_z_w11.d4, - ), - ); - - tempvar xy: UnreducedBigInt5 = UnreducedBigInt5( - d0=x_of_z.d0 * y_of_z.d0, - d1=x_of_z.d0 * y_of_z.d1 + x_of_z.d1 * y_of_z.d0, - d2=x_of_z.d0 * y_of_z.d2 + x_of_z.d1 * y_of_z.d1 + x_of_z.d2 * y_of_z.d0, - d3=x_of_z.d1 * y_of_z.d2 + x_of_z.d2 * y_of_z.d1, - d4=x_of_z.d2 * y_of_z.d2, - ); - - tempvar q_of_z_w1 = UnreducedBigInt5( - d0=q_w.w1.d0 * z_pow1_11.z_1.d0, - d1=q_w.w1.d0 * z_pow1_11.z_1.d1 + q_w.w1.d1 * z_pow1_11.z_1.d0, - d2=q_w.w1.d0 * z_pow1_11.z_1.d2 + q_w.w1.d1 * z_pow1_11.z_1.d1 + q_w.w1.d2 * - z_pow1_11.z_1.d0, - d3=q_w.w1.d1 * z_pow1_11.z_1.d2 + q_w.w1.d2 * z_pow1_11.z_1.d1, - d4=q_w.w1.d2 * z_pow1_11.z_1.d2, - ); - - tempvar q_of_z_w2 = UnreducedBigInt5( - d0=q_w.w2.d0 * z_pow1_11.z_2.d0, - d1=q_w.w2.d0 * z_pow1_11.z_2.d1 + q_w.w2.d1 * z_pow1_11.z_2.d0, - d2=q_w.w2.d0 * z_pow1_11.z_2.d2 + q_w.w2.d1 * z_pow1_11.z_2.d1 + q_w.w2.d2 * - z_pow1_11.z_2.d0, - d3=q_w.w2.d1 * z_pow1_11.z_2.d2 + q_w.w2.d2 * z_pow1_11.z_2.d1, - d4=q_w.w2.d2 * z_pow1_11.z_2.d2, - ); - - tempvar q_of_z_w3 = UnreducedBigInt5( - d0=q_w.w3.d0 * z_pow1_11.z_3.d0, - d1=q_w.w3.d0 * z_pow1_11.z_3.d1 + q_w.w3.d1 * z_pow1_11.z_3.d0, - d2=q_w.w3.d0 * z_pow1_11.z_3.d2 + q_w.w3.d1 * z_pow1_11.z_3.d1 + q_w.w3.d2 * - z_pow1_11.z_3.d0, - d3=q_w.w3.d1 * z_pow1_11.z_3.d2 + q_w.w3.d2 * z_pow1_11.z_3.d1, - d4=q_w.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar q_of_z_w4 = UnreducedBigInt5( - d0=q_w.w4.d0 * z_pow1_11.z_4.d0, - d1=q_w.w4.d0 * z_pow1_11.z_4.d1 + q_w.w4.d1 * z_pow1_11.z_4.d0, - d2=q_w.w4.d0 * z_pow1_11.z_4.d2 + q_w.w4.d1 * z_pow1_11.z_4.d1 + q_w.w4.d2 * - z_pow1_11.z_4.d0, - d3=q_w.w4.d1 * z_pow1_11.z_4.d2 + q_w.w4.d2 * z_pow1_11.z_4.d1, - d4=q_w.w4.d2 * z_pow1_11.z_4.d2, - ); - - tempvar q_of_z_w5 = UnreducedBigInt5( - d0=q_w.w5.d0 * z_pow1_11.z_5.d0, - d1=q_w.w5.d0 * z_pow1_11.z_5.d1 + q_w.w5.d1 * z_pow1_11.z_5.d0, - d2=q_w.w5.d0 * z_pow1_11.z_5.d2 + q_w.w5.d1 * z_pow1_11.z_5.d1 + q_w.w5.d2 * - z_pow1_11.z_5.d0, - d3=q_w.w5.d1 * z_pow1_11.z_5.d2 + q_w.w5.d2 * z_pow1_11.z_5.d1, - d4=q_w.w5.d2 * z_pow1_11.z_5.d2, - ); - - tempvar q_of_z_w6 = UnreducedBigInt5( - d0=q_w.w6.d0 * z_pow1_11.z_6.d0, - d1=q_w.w6.d0 * z_pow1_11.z_6.d1 + q_w.w6.d1 * z_pow1_11.z_6.d0, - d2=q_w.w6.d0 * z_pow1_11.z_6.d2 + q_w.w6.d1 * z_pow1_11.z_6.d1 + q_w.w6.d2 * - z_pow1_11.z_6.d0, - d3=q_w.w6.d1 * z_pow1_11.z_6.d2 + q_w.w6.d2 * z_pow1_11.z_6.d1, - d4=q_w.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar q_of_z_w7 = UnreducedBigInt5( - d0=q_w.w7.d0 * z_pow1_11.z_7.d0, - d1=q_w.w7.d0 * z_pow1_11.z_7.d1 + q_w.w7.d1 * z_pow1_11.z_7.d0, - d2=q_w.w7.d0 * z_pow1_11.z_7.d2 + q_w.w7.d1 * z_pow1_11.z_7.d1 + q_w.w7.d2 * - z_pow1_11.z_7.d0, - d3=q_w.w7.d1 * z_pow1_11.z_7.d2 + q_w.w7.d2 * z_pow1_11.z_7.d1, - d4=q_w.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar q_of_z_w8 = UnreducedBigInt5( - d0=q_w.w8.d0 * z_pow1_11.z_8.d0, - d1=q_w.w8.d0 * z_pow1_11.z_8.d1 + q_w.w8.d1 * z_pow1_11.z_8.d0, - d2=q_w.w8.d0 * z_pow1_11.z_8.d2 + q_w.w8.d1 * z_pow1_11.z_8.d1 + q_w.w8.d2 * - z_pow1_11.z_8.d0, - d3=q_w.w8.d1 * z_pow1_11.z_8.d2 + q_w.w8.d2 * z_pow1_11.z_8.d1, - d4=q_w.w8.d2 * z_pow1_11.z_8.d2, - ); - - tempvar q_of_z_w9 = UnreducedBigInt5( - d0=q_w.w9.d0 * z_pow1_11.z_9.d0, - d1=q_w.w9.d0 * z_pow1_11.z_9.d1 + q_w.w9.d1 * z_pow1_11.z_9.d0, - d2=q_w.w9.d0 * z_pow1_11.z_9.d2 + q_w.w9.d1 * z_pow1_11.z_9.d1 + q_w.w9.d2 * - z_pow1_11.z_9.d0, - d3=q_w.w9.d1 * z_pow1_11.z_9.d2 + q_w.w9.d2 * z_pow1_11.z_9.d1, - d4=q_w.w9.d2 * z_pow1_11.z_9.d2, - ); - - tempvar q_of_z_w10 = UnreducedBigInt5( - d0=q_w.w10.d0 * z_pow1_11.z_10.d0, - d1=q_w.w10.d0 * z_pow1_11.z_10.d1 + q_w.w10.d1 * z_pow1_11.z_10.d0, - d2=q_w.w10.d0 * z_pow1_11.z_10.d2 + q_w.w10.d1 * z_pow1_11.z_10.d1 + q_w.w10.d2 * - z_pow1_11.z_10.d0, - d3=q_w.w10.d1 * z_pow1_11.z_10.d2 + q_w.w10.d2 * z_pow1_11.z_10.d1, - d4=q_w.w10.d2 * z_pow1_11.z_10.d2, - ); - - let q_of_z = reduce_5( - UnreducedBigInt5( - d0=q_w.w0.d0 + q_of_z_w1.d0 + q_of_z_w2.d0 + q_of_z_w3.d0 + q_of_z_w4.d0 + - q_of_z_w5.d0 + q_of_z_w6.d0 + q_of_z_w7.d0 + q_of_z_w8.d0 + q_of_z_w9.d0 + - q_of_z_w10.d0, - d1=q_w.w0.d1 + q_of_z_w1.d1 + q_of_z_w2.d1 + q_of_z_w3.d1 + q_of_z_w4.d1 + - q_of_z_w5.d1 + q_of_z_w6.d1 + q_of_z_w7.d1 + q_of_z_w8.d1 + q_of_z_w9.d1 + - q_of_z_w10.d1, - d2=q_w.w0.d2 + q_of_z_w1.d2 + q_of_z_w2.d2 + q_of_z_w3.d2 + q_of_z_w4.d2 + - q_of_z_w5.d2 + q_of_z_w6.d2 + q_of_z_w7.d2 + q_of_z_w8.d2 + q_of_z_w9.d2 + - q_of_z_w10.d2, - d3=q_of_z_w1.d3 + q_of_z_w2.d3 + q_of_z_w3.d3 + q_of_z_w4.d3 + q_of_z_w5.d3 + - q_of_z_w6.d3 + q_of_z_w7.d3 + q_of_z_w8.d3 + q_of_z_w9.d3 + q_of_z_w10.d3, - d4=q_of_z_w1.d4 + q_of_z_w2.d4 + q_of_z_w3.d4 + q_of_z_w4.d4 + q_of_z_w5.d4 + - q_of_z_w6.d4 + q_of_z_w7.d4 + q_of_z_w8.d4 + q_of_z_w9.d4 + q_of_z_w10.d4, - ), - ); - let z_12 = fq_bigint3.mul(z_pow1_11.z_1, z_pow1_11.z_11); - let p_of_z = eval_irreducible_poly12(z_pow1_11.z_6, z_12); - - tempvar q_p_of_z = UnreducedBigInt5( - d0=q_of_z.d0 * p_of_z.d0, - d1=q_of_z.d0 * p_of_z.d1 + q_of_z.d1 * p_of_z.d0, - d2=q_of_z.d0 * p_of_z.d2 + q_of_z.d1 * p_of_z.d1 + q_of_z.d2 * p_of_z.d0, - d3=q_of_z.d1 * p_of_z.d2 + q_of_z.d2 * p_of_z.d1, - d4=q_of_z.d2 * p_of_z.d2, - ); - - tempvar r_of_z_w1: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w1.d0 * z_pow1_11.z_1.d0, - d1=r_w.w1.d0 * z_pow1_11.z_1.d1 + r_w.w1.d1 * z_pow1_11.z_1.d0, - d2=r_w.w1.d0 * z_pow1_11.z_1.d2 + r_w.w1.d1 * z_pow1_11.z_1.d1 + r_w.w1.d2 * - z_pow1_11.z_1.d0, - d3=r_w.w1.d1 * z_pow1_11.z_1.d2 + r_w.w1.d2 * z_pow1_11.z_1.d1, - d4=r_w.w1.d2 * z_pow1_11.z_1.d2, - ); - - tempvar r_of_z_w2: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w2.d0 * z_pow1_11.z_2.d0, - d1=r_w.w2.d0 * z_pow1_11.z_2.d1 + r_w.w2.d1 * z_pow1_11.z_2.d0, - d2=r_w.w2.d0 * z_pow1_11.z_2.d2 + r_w.w2.d1 * z_pow1_11.z_2.d1 + r_w.w2.d2 * - z_pow1_11.z_2.d0, - d3=r_w.w2.d1 * z_pow1_11.z_2.d2 + r_w.w2.d2 * z_pow1_11.z_2.d1, - d4=r_w.w2.d2 * z_pow1_11.z_2.d2, - ); - - tempvar r_of_z_w3: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w3.d0 * z_pow1_11.z_3.d0, - d1=r_w.w3.d0 * z_pow1_11.z_3.d1 + r_w.w3.d1 * z_pow1_11.z_3.d0, - d2=r_w.w3.d0 * z_pow1_11.z_3.d2 + r_w.w3.d1 * z_pow1_11.z_3.d1 + r_w.w3.d2 * - z_pow1_11.z_3.d0, - d3=r_w.w3.d1 * z_pow1_11.z_3.d2 + r_w.w3.d2 * z_pow1_11.z_3.d1, - d4=r_w.w3.d2 * z_pow1_11.z_3.d2, - ); - - tempvar r_of_z_w4: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w4.d0 * z_pow1_11.z_4.d0, - d1=r_w.w4.d0 * z_pow1_11.z_4.d1 + r_w.w4.d1 * z_pow1_11.z_4.d0, - d2=r_w.w4.d0 * z_pow1_11.z_4.d2 + r_w.w4.d1 * z_pow1_11.z_4.d1 + r_w.w4.d2 * - z_pow1_11.z_4.d0, - d3=r_w.w4.d1 * z_pow1_11.z_4.d2 + r_w.w4.d2 * z_pow1_11.z_4.d1, - d4=r_w.w4.d2 * z_pow1_11.z_4.d2, - ); - - tempvar r_of_z_w5: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w5.d0 * z_pow1_11.z_5.d0, - d1=r_w.w5.d0 * z_pow1_11.z_5.d1 + r_w.w5.d1 * z_pow1_11.z_5.d0, - d2=r_w.w5.d0 * z_pow1_11.z_5.d2 + r_w.w5.d1 * z_pow1_11.z_5.d1 + r_w.w5.d2 * - z_pow1_11.z_5.d0, - d3=r_w.w5.d1 * z_pow1_11.z_5.d2 + r_w.w5.d2 * z_pow1_11.z_5.d1, - d4=r_w.w5.d2 * z_pow1_11.z_5.d2, - ); - - tempvar r_of_z_w6: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w6.d0 * z_pow1_11.z_6.d0, - d1=r_w.w6.d0 * z_pow1_11.z_6.d1 + r_w.w6.d1 * z_pow1_11.z_6.d0, - d2=r_w.w6.d0 * z_pow1_11.z_6.d2 + r_w.w6.d1 * z_pow1_11.z_6.d1 + r_w.w6.d2 * - z_pow1_11.z_6.d0, - d3=r_w.w6.d1 * z_pow1_11.z_6.d2 + r_w.w6.d2 * z_pow1_11.z_6.d1, - d4=r_w.w6.d2 * z_pow1_11.z_6.d2, - ); - - tempvar r_of_z_w7: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w7.d0 * z_pow1_11.z_7.d0, - d1=r_w.w7.d0 * z_pow1_11.z_7.d1 + r_w.w7.d1 * z_pow1_11.z_7.d0, - d2=r_w.w7.d0 * z_pow1_11.z_7.d2 + r_w.w7.d1 * z_pow1_11.z_7.d1 + r_w.w7.d2 * - z_pow1_11.z_7.d0, - d3=r_w.w7.d1 * z_pow1_11.z_7.d2 + r_w.w7.d2 * z_pow1_11.z_7.d1, - d4=r_w.w7.d2 * z_pow1_11.z_7.d2, - ); - - tempvar r_of_z_w8: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w8.d0 * z_pow1_11.z_8.d0, - d1=r_w.w8.d0 * z_pow1_11.z_8.d1 + r_w.w8.d1 * z_pow1_11.z_8.d0, - d2=r_w.w8.d0 * z_pow1_11.z_8.d2 + r_w.w8.d1 * z_pow1_11.z_8.d1 + r_w.w8.d2 * - z_pow1_11.z_8.d0, - d3=r_w.w8.d1 * z_pow1_11.z_8.d2 + r_w.w8.d2 * z_pow1_11.z_8.d1, - d4=r_w.w8.d2 * z_pow1_11.z_8.d2, - ); - - tempvar r_of_z_w9: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w9.d0 * z_pow1_11.z_9.d0, - d1=r_w.w9.d0 * z_pow1_11.z_9.d1 + r_w.w9.d1 * z_pow1_11.z_9.d0, - d2=r_w.w9.d0 * z_pow1_11.z_9.d2 + r_w.w9.d1 * z_pow1_11.z_9.d1 + r_w.w9.d2 * - z_pow1_11.z_9.d0, - d3=r_w.w9.d1 * z_pow1_11.z_9.d2 + r_w.w9.d2 * z_pow1_11.z_9.d1, - d4=r_w.w9.d2 * z_pow1_11.z_9.d2, - ); - - tempvar r_of_z_w10: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w10.d0 * z_pow1_11.z_10.d0, - d1=r_w.w10.d0 * z_pow1_11.z_10.d1 + r_w.w10.d1 * z_pow1_11.z_10.d0, - d2=r_w.w10.d0 * z_pow1_11.z_10.d2 + r_w.w10.d1 * z_pow1_11.z_10.d1 + r_w.w10.d2 * - z_pow1_11.z_10.d0, - d3=r_w.w10.d1 * z_pow1_11.z_10.d2 + r_w.w10.d2 * z_pow1_11.z_10.d1, - d4=r_w.w10.d2 * z_pow1_11.z_10.d2, - ); - - tempvar r_of_z_w11: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w11.d0 * z_pow1_11.z_11.d0, - d1=r_w.w11.d0 * z_pow1_11.z_11.d1 + r_w.w11.d1 * z_pow1_11.z_11.d0, - d2=r_w.w11.d0 * z_pow1_11.z_11.d2 + r_w.w11.d1 * z_pow1_11.z_11.d1 + r_w.w11.d2 * - z_pow1_11.z_11.d0, - d3=r_w.w11.d1 * z_pow1_11.z_11.d2 + r_w.w11.d2 * z_pow1_11.z_11.d1, - d4=r_w.w11.d2 * z_pow1_11.z_11.d2, - ); - - tempvar r_of_z: UnreducedBigInt5 = UnreducedBigInt5( - d0=r_w.w0.d0 + r_of_z_w1.d0 + r_of_z_w2.d0 + r_of_z_w3.d0 + r_of_z_w4.d0 + - r_of_z_w5.d0 + r_of_z_w6.d0 + r_of_z_w7.d0 + r_of_z_w8.d0 + r_of_z_w9.d0 + - r_of_z_w10.d0 + r_of_z_w11.d0, - d1=r_w.w0.d1 + r_of_z_w1.d1 + r_of_z_w2.d1 + r_of_z_w3.d1 + r_of_z_w4.d1 + - r_of_z_w5.d1 + r_of_z_w6.d1 + r_of_z_w7.d1 + r_of_z_w8.d1 + r_of_z_w9.d1 + - r_of_z_w10.d1 + r_of_z_w11.d1, - d2=r_w.w0.d2 + r_of_z_w1.d2 + r_of_z_w2.d2 + r_of_z_w3.d2 + r_of_z_w4.d2 + - r_of_z_w5.d2 + r_of_z_w6.d2 + r_of_z_w7.d2 + r_of_z_w8.d2 + r_of_z_w9.d2 + - r_of_z_w10.d2 + r_of_z_w11.d2, - d3=r_of_z_w1.d3 + r_of_z_w2.d3 + r_of_z_w3.d3 + r_of_z_w4.d3 + r_of_z_w5.d3 + - r_of_z_w6.d3 + r_of_z_w7.d3 + r_of_z_w8.d3 + r_of_z_w9.d3 + r_of_z_w10.d3 + - r_of_z_w11.d3, - d4=r_of_z_w1.d4 + r_of_z_w2.d4 + r_of_z_w3.d4 + r_of_z_w4.d4 + r_of_z_w5.d4 + - r_of_z_w6.d4 + r_of_z_w7.d4 + r_of_z_w8.d4 + r_of_z_w9.d4 + r_of_z_w10.d4 + - r_of_z_w11.d4, - ); - - verify_zero5( - UnreducedBigInt5( - d0=xy.d0 - q_p_of_z.d0 - r_of_z.d0, - d1=xy.d1 - q_p_of_z.d1 - r_of_z.d1, - d2=xy.d2 - q_p_of_z.d2 - r_of_z.d2, - d3=xy.d3 - q_p_of_z.d3 - r_of_z.d3, - d4=xy.d4 - q_p_of_z.d4 - r_of_z.d4, - ), - ); - - return &r_w; - } - func conjugate{range_check_ptr}(x: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let c1 = e6.neg(x.c1); - local res: E12 = E12(x.c0, c1); - return &res; - } - // Adds two E12 elements - func add{range_check_ptr}(x: E12*, y: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let c0 = e6.add(x.c0, y.c0); - let c1 = e6.add(x.c1, y.c1); - local res: E12 = E12(c0, c1); - return &res; - } - - // Subtracts two E12 elements - func sub{range_check_ptr}(x: E12*, y: E12*) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let c0 = e6.sub(x.c0, y.c0); - let c1 = e6.sub(x.c1, y.c1); - local res: E12 = E12(c0, c1); - return &res; - } - - func div_full{range_check_ptr, poseidon_ptr: PoseidonBuiltin*}(x: E12D*, y: E12D*) -> E12D* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local div: E12D; - - %{ - from starkware.cairo.common.math_utils import as_int - from tools.py.extension_trick import inv_e12, mul_e12, pack_e12, flatten, w_to_gnark, gnark_to_w - assert 1 < ids.N_LIMBS <= 12 - p, x, y=0, 12*[0], 12*[0] - for i in range(ids.N_LIMBS): - for k in range(12): - x[k]+=as_int(getattr(getattr(ids.x, f'w{k}'), f'd{i}'), PRIME) * ids.BASE**i - y[k]+=as_int(getattr(getattr(ids.y, f'w{k}'), f'd{i}'), PRIME) * ids.BASE**i - - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - x = w_to_gnark(x) - y = w_to_gnark(y) - y_inv = inv_e12(*pack_e12(y)) - - x_over_y = mul_e12(pack_e12(x), pack_e12(y_inv)) - assert x == flatten(mul_e12(pack_e12(y), x_over_y)) - x_over_y_full = gnark_to_w(flatten(x_over_y)) - div = [split(wi) for wi in x_over_y_full] - for i in range(12): - for l in range(ids.N_LIMBS): - setattr(getattr(ids.div,f'w{i}'),f'd{l}',div[i][l]) - %} - assert_reduced_E12D(div); - // Computes y * (x/y) = x - let check = e12.mul_trick_pure(y, &div); - assert_E12D(x, check); - return ÷ - } - - func is_zero{range_check_ptr}(x: E12*) -> felt { - let c0_is_zero = e6.is_zero(x.c0); - if (c0_is_zero == 0) { - return 0; - } - - let c1_is_zero = e6.is_zero(x.c1); - return c1_is_zero; - } - func zero{}() -> E12* { - let c0 = e6.zero(); - let c1 = e6.zero(); - tempvar res = new E12(c0, c1); - return res; - } - func one{}() -> E12* { - let c0 = e6.one(); - let c1 = e6.zero(); - tempvar res = new E12(c0, c1); - return res; - } - func one_full() -> E12D* { - tempvar res = new E12D( - BigInt3(1, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - ); - return res; - } - func zero_full() -> E12D { - tempvar res = E12D( - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - BigInt3(0, 0, 0), - ); - return res; - } - - func assert_E12(x: E12*, z: E12*) { - e6.assert_E6(x.c0, z.c0); - e6.assert_E6(x.c1, z.c1); - return (); - } - func assert_E12D(x: E12D*, y: E12D*) { - assert x.w0.d0 = y.w0.d0; - assert x.w0.d1 = y.w0.d1; - assert x.w0.d2 = y.w0.d2; - assert x.w1.d0 = y.w1.d0; - assert x.w1.d1 = y.w1.d1; - assert x.w1.d2 = y.w1.d2; - assert x.w2.d0 = y.w2.d0; - assert x.w2.d1 = y.w2.d1; - assert x.w2.d2 = y.w2.d2; - assert x.w3.d0 = y.w3.d0; - assert x.w3.d1 = y.w3.d1; - assert x.w3.d2 = y.w3.d2; - assert x.w4.d0 = y.w4.d0; - assert x.w4.d1 = y.w4.d1; - assert x.w4.d2 = y.w4.d2; - assert x.w5.d0 = y.w5.d0; - assert x.w5.d1 = y.w5.d1; - assert x.w5.d2 = y.w5.d2; - assert x.w6.d0 = y.w6.d0; - assert x.w6.d1 = y.w6.d1; - assert x.w6.d2 = y.w6.d2; - assert x.w7.d0 = y.w7.d0; - assert x.w7.d1 = y.w7.d1; - assert x.w7.d2 = y.w7.d2; - assert x.w8.d0 = y.w8.d0; - assert x.w8.d1 = y.w8.d1; - assert x.w8.d2 = y.w8.d2; - assert x.w9.d0 = y.w9.d0; - assert x.w9.d1 = y.w9.d1; - assert x.w9.d2 = y.w9.d2; - assert x.w10.d0 = y.w10.d0; - assert x.w10.d1 = y.w10.d1; - assert x.w10.d2 = y.w10.d2; - assert x.w11.d0 = y.w11.d0; - assert x.w11.d1 = y.w11.d1; - assert x.w11.d2 = y.w11.d2; - return (); - } -} - -func eval_irreducible_poly12{range_check_ptr}(z_6: BigInt3, z_12: BigInt3) -> BigInt3 { - alloc_locals; - local w6: BigInt3 = BigInt3( - 60193888514187762220203317, 27625954992973055882053025, 3656382694611191768777988 - ); // -18 % p - let (e6) = bigint_mul(w6, z_6); - - let res = reduce_5( - UnreducedBigInt5( - d0=82 + e6.d0 + z_12.d0, d1=e6.d1 + z_12.d1, d2=e6.d2 + z_12.d2, d3=e6.d3, d4=e6.d4 - ), - ); - return res; -} -func eval_E11{range_check_ptr}(e12: E11DU, powers: ZPowers11*) -> BigInt3 { - alloc_locals; - let (w0) = unrededucedUint256_to_BigInt3(e12.w0); - let (w1) = unrededucedUint256_to_BigInt3(e12.w1); - let (w2) = unrededucedUint256_to_BigInt3(e12.w2); - let (w3) = unrededucedUint256_to_BigInt3(e12.w3); - let (w4) = unrededucedUint256_to_BigInt3(e12.w4); - let (w5) = unrededucedUint256_to_BigInt3(e12.w5); - let (w6) = unrededucedUint256_to_BigInt3(e12.w6); - let (w7) = unrededucedUint256_to_BigInt3(e12.w7); - let (w8) = unrededucedUint256_to_BigInt3(e12.w8); - let (w9) = unrededucedUint256_to_BigInt3(e12.w9); - let (w10) = unrededucedUint256_to_BigInt3(e12.w10); - - let e0 = w0; - let (e1) = bigint_mul(w1, powers.z_1); - let (e2) = bigint_mul(w2, powers.z_2); - let (e3) = bigint_mul(w3, powers.z_3); - let (e4) = bigint_mul(w4, powers.z_4); - let (e5) = bigint_mul(w5, powers.z_5); - let (e6) = bigint_mul(w6, powers.z_6); - let (e7) = bigint_mul(w7, powers.z_7); - let (e8) = bigint_mul(w8, powers.z_8); - let (e9) = bigint_mul(w9, powers.z_9); - let (e10) = bigint_mul(w10, powers.z_10); - let res = reduce_5( - UnreducedBigInt5( - d0=e0.d0 + e1.d0 + e2.d0 + e3.d0 + e4.d0 + e5.d0 + e6.d0 + e7.d0 + e8.d0 + e9.d0 + - e10.d0, - d1=e0.d1 + e1.d1 + e2.d1 + e3.d1 + e4.d1 + e5.d1 + e6.d1 + e7.d1 + e8.d1 + e9.d1 + - e10.d1, - d2=e0.d2 + e1.d2 + e2.d2 + e3.d2 + e4.d2 + e5.d2 + e6.d2 + e7.d2 + e8.d2 + e9.d2 + - e10.d2, - d3=e1.d3 + e2.d3 + e3.d3 + e4.d3 + e5.d3 + e6.d3 + e7.d3 + e8.d3 + e9.d3 + e10.d3, - d4=e1.d4 + e2.d4 + e3.d4 + e4.d4 + e5.d4 + e6.d4 + e7.d4 + e8.d4 + e9.d4 + e10.d4, - ), - ); - return res; -} - -func eval_E12_unreduced{range_check_ptr}(e12: E12DU, powers: ZPowers11*) -> UnreducedBigInt5 { - alloc_locals; - let w1 = reduce_3(e12.w1); - let w2 = reduce_3(e12.w2); - let w3 = reduce_3(e12.w3); - let w4 = reduce_3(e12.w4); - let w5 = reduce_3(e12.w5); - let w6 = reduce_3(e12.w6); - let w7 = reduce_3(e12.w7); - let w8 = reduce_3(e12.w8); - let w9 = reduce_3(e12.w9); - let w10 = reduce_3(e12.w10); - let w11 = reduce_3(e12.w11); - - let e0 = e12.w0; - let (e1) = bigint_mul(w1, powers.z_1); - let (e2) = bigint_mul(w2, powers.z_2); - let (e3) = bigint_mul(w3, powers.z_3); - let (e4) = bigint_mul(w4, powers.z_4); - let (e5) = bigint_mul(w5, powers.z_5); - let (e6) = bigint_mul(w6, powers.z_6); - let (e7) = bigint_mul(w7, powers.z_7); - let (e8) = bigint_mul(w8, powers.z_8); - let (e9) = bigint_mul(w9, powers.z_9); - let (e10) = bigint_mul(w10, powers.z_10); - let (e11) = bigint_mul(w11, powers.z_11); - let res = UnreducedBigInt5( - d0=e0.d0 + e1.d0 + e2.d0 + e3.d0 + e4.d0 + e5.d0 + e6.d0 + e7.d0 + e8.d0 + e9.d0 + e10.d0 + - e11.d0, - d1=e0.d1 + e1.d1 + e2.d1 + e3.d1 + e4.d1 + e5.d1 + e6.d1 + e7.d1 + e8.d1 + e9.d1 + e10.d1 + - e11.d1, - d2=e0.d2 + e1.d2 + e2.d2 + e3.d2 + e4.d2 + e5.d2 + e6.d2 + e7.d2 + e8.d2 + e9.d2 + e10.d2 + - e11.d2, - d3=e1.d3 + e2.d3 + e3.d3 + e4.d3 + e5.d3 + e6.d3 + e7.d3 + e8.d3 + e9.d3 + e10.d3 + e11.d3, - d4=e1.d4 + e2.d4 + e3.d4 + e4.d4 + e5.d4 + e6.d4 + e7.d4 + e8.d4 + e9.d4 + e10.d4 + e11.d4, - ); - return res; -} - -func get_powers_of_z11{range_check_ptr}(z: BigInt3) -> ZPowers11* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let z_2 = fq_bigint3.mul(z, z); - let z_3 = fq_bigint3.mul(z_2, z); - let z_4 = fq_bigint3.mul(z_3, z); - let z_5 = fq_bigint3.mul(z_4, z); - let z_6 = fq_bigint3.mul(z_5, z); - let z_7 = fq_bigint3.mul(z_6, z); - let z_8 = fq_bigint3.mul(z_7, z); - let z_9 = fq_bigint3.mul(z_8, z); - let z_10 = fq_bigint3.mul(z_9, z); - let z_11 = fq_bigint3.mul(z_10, z); - - local res: ZPowers11 = ZPowers11( - z_1=z, - z_2=z_2, - z_3=z_3, - z_4=z_4, - z_5=z_5, - z_6=z_6, - z_7=z_7, - z_8=z_8, - z_9=z_9, - z_10=z_10, - z_11=z_11, - ); - return &res; -} - -// Convert tower representations Fp12/Fp6/Fp2/Fp to Fp12/Fp -func gnark_to_w{range_check_ptr}(x: E12*) -> (res: E12D*) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local res: E12D = E12D( - w0=BigInt3( - x.c0.b0.a0.d0 - 9 * x.c0.b0.a1.d0, - x.c0.b0.a0.d1 - 9 * x.c0.b0.a1.d1, - x.c0.b0.a0.d2 - 9 * x.c0.b0.a1.d2, - ), - w1=BigInt3( - x.c1.b0.a0.d0 - 9 * x.c1.b0.a1.d0, - x.c1.b0.a0.d1 - 9 * x.c1.b0.a1.d1, - x.c1.b0.a0.d2 - 9 * x.c1.b0.a1.d2, - ), - w2=BigInt3( - x.c0.b1.a0.d0 - 9 * x.c0.b1.a1.d0, - x.c0.b1.a0.d1 - 9 * x.c0.b1.a1.d1, - x.c0.b1.a0.d2 - 9 * x.c0.b1.a1.d2, - ), - w3=BigInt3( - x.c1.b1.a0.d0 - 9 * x.c1.b1.a1.d0, - x.c1.b1.a0.d1 - 9 * x.c1.b1.a1.d1, - x.c1.b1.a0.d2 - 9 * x.c1.b1.a1.d2, - ), - w4=BigInt3( - x.c0.b2.a0.d0 - 9 * x.c0.b2.a1.d0, - x.c0.b2.a0.d1 - 9 * x.c0.b2.a1.d1, - x.c0.b2.a0.d2 - 9 * x.c0.b2.a1.d2, - ), - w5=BigInt3( - x.c1.b2.a0.d0 - 9 * x.c1.b2.a1.d0, - x.c1.b2.a0.d1 - 9 * x.c1.b2.a1.d1, - x.c1.b2.a0.d2 - 9 * x.c1.b2.a1.d2, - ), - w6=x.c0.b0.a1, - w7=x.c1.b0.a1, - w8=x.c0.b1.a1, - w9=x.c1.b1.a1, - w10=x.c0.b2.a1, - w11=x.c1.b2.a1, - ); - return (&res,); -} - -// E12_034{ -// C0: E6{B0: 1, B1: 0, B2: 0}, -// C1: E6{B0: c3, B1: c4, B2: 0}, -// } -// c3 <=> x.c1.b0 -// c4 <=> x.c1.b1 - -func gnark034_to_w{range_check_ptr}(c3: E2*, c4: E2*) -> (res: E12full034*) { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local res: E12full034 = E12full034( - w1=BigInt3(c3.a0.d0 - 9 * c3.a1.d0, c3.a0.d1 - 9 * c3.a1.d1, c3.a0.d2 - 9 * c3.a1.d2), - w3=BigInt3(c4.a0.d0 - 9 * c4.a1.d0, c4.a0.d1 - 9 * c4.a1.d1, c4.a0.d2 - 9 * c4.a1.d2), - w7=c3.a1, - w9=c4.a1, - ); - return (&res,); -} -// Convert tower representation Fp12/Fp to Fp12/Fp6/Fp2/Fp -func w_to_gnark(x: E12D) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - // w^0 - let c0b0a0 = x.w0; - // w^1 - let c1b0a0 = x.w1; - // w^2 - let c0b1a0 = x.w2; - // w^3 - let c1b1a0 = x.w3; - // w^4 - let c0b2a0 = x.w4; - // w^5 - let c1b2a0 = x.w5; - // w^6 - local c0b0a1: BigInt3 = x.w6; - local c0b0a0: BigInt3 = BigInt3( - c0b0a0.d0 + 9 * c0b0a1.d0, c0b0a0.d1 + 9 * c0b0a1.d1, c0b0a0.d2 + 9 * c0b0a1.d2 - ); - // w^7 - local c1b0a1: BigInt3 = x.w7; - local c1b0a0: BigInt3 = BigInt3( - c1b0a0.d0 + 9 * c1b0a1.d0, c1b0a0.d1 + 9 * c1b0a1.d1, c1b0a0.d2 + 9 * c1b0a1.d2 - ); - // w^8 - local c0b1a1: BigInt3 = x.w8; - local c0b1a0: BigInt3 = BigInt3( - c0b1a0.d0 + 9 * c0b1a1.d0, c0b1a0.d1 + 9 * c0b1a1.d1, c0b1a0.d2 + 9 * c0b1a1.d2 - ); - // w^9 - local c1b1a1: BigInt3 = x.w9; - local c1b1a0: BigInt3 = BigInt3( - c1b1a0.d0 + 9 * c1b1a1.d0, c1b1a0.d1 + 9 * c1b1a1.d1, c1b1a0.d2 + 9 * c1b1a1.d2 - ); - // w^10 - local c0b2a1: BigInt3 = x.w10; - local c0b2a0: BigInt3 = BigInt3( - c0b2a0.d0 + 9 * c0b2a1.d0, c0b2a0.d1 + 9 * c0b2a1.d1, c0b2a0.d2 + 9 * c0b2a1.d2 - ); - // w^11 - local c1b2a1: BigInt3 = x.w11; - local c1b2a0: BigInt3 = BigInt3( - c1b2a0.d0 + 9 * c1b2a1.d0, c1b2a0.d1 + 9 * c1b2a1.d1, c1b2a0.d2 + 9 * c1b2a1.d2 - ); - - local c0b0: E2 = E2(c0b0a0, c0b0a1); - local c0b1: E2 = E2(c0b1a0, c0b1a1); - local c0b2: E2 = E2(c0b2a0, c0b2a1); - local c1b0: E2 = E2(c1b0a0, c1b0a1); - local c1b1: E2 = E2(c1b1a0, c1b1a1); - local c1b2: E2 = E2(c1b2a0, c1b2a1); - local c0: E6 = E6(&c0b0, &c0b1, &c0b2); - local c1: E6 = E6(&c1b0, &c1b1, &c1b2); - local res: E12 = E12(&c0, &c1); - return &res; -} - -func w_to_gnark_reduced{range_check_ptr}(x: E12D) -> E12* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - // w^0 - // let c0b0a0 = x.w0; - // // w^1 - // let c1b0a0 = x.w1; - // // w^2 - // let c0b1a0 = x.w2; - // // w^3 - // let c1b1a0 = x.w3; - // // w^4 - // let c0b2a0 = x.w4; - // // w^5 - // let c1b2a0 = x.w5; - // w^6 - local c0b0a1: BigInt3 = x.w6; - let c0b0a0 = reduce_3( - UnreducedBigInt3(x.w0.d0 + 9 * c0b0a1.d0, x.w0.d1 + 9 * c0b0a1.d1, x.w0.d2 + 9 * c0b0a1.d2) - ); - // w^7 - local c1b0a1: BigInt3 = x.w7; - let c1b0a0 = reduce_3( - UnreducedBigInt3(x.w1.d0 + 9 * c1b0a1.d0, x.w1.d1 + 9 * c1b0a1.d1, x.w1.d2 + 9 * c1b0a1.d2) - ); - // w^8 - local c0b1a1: BigInt3 = x.w8; - let c0b1a0 = reduce_3( - UnreducedBigInt3(x.w2.d0 + 9 * c0b1a1.d0, x.w2.d1 + 9 * c0b1a1.d1, x.w2.d2 + 9 * c0b1a1.d2) - ); - // w^9 - local c1b1a1: BigInt3 = x.w9; - let c1b1a0 = reduce_3( - UnreducedBigInt3(x.w3.d0 + 9 * c1b1a1.d0, x.w3.d1 + 9 * c1b1a1.d1, x.w3.d2 + 9 * c1b1a1.d2) - ); - // w^10 - local c0b2a1: BigInt3 = x.w10; - let c0b2a0 = reduce_3( - UnreducedBigInt3(x.w4.d0 + 9 * c0b2a1.d0, x.w4.d1 + 9 * c0b2a1.d1, x.w4.d2 + 9 * c0b2a1.d2) - ); - // w^11 - local c1b2a1: BigInt3 = x.w11; - let c1b2a0 = reduce_3( - UnreducedBigInt3(x.w5.d0 + 9 * c1b2a1.d0, x.w5.d1 + 9 * c1b2a1.d1, x.w5.d2 + 9 * c1b2a1.d2) - ); - - local c0b0: E2 = E2(c0b0a0, c0b0a1); - local c0b1: E2 = E2(c0b1a0, c0b1a1); - local c0b2: E2 = E2(c0b2a0, c0b2a1); - local c1b0: E2 = E2(c1b0a0, c1b0a1); - local c1b1: E2 = E2(c1b1a0, c1b1a1); - local c1b2: E2 = E2(c1b2a0, c1b2a1); - local c0: E6 = E6(&c0b0, &c0b1, &c0b2); - local c1: E6 = E6(&c1b0, &c1b1, &c1b2); - local res: E12 = E12(&c0, &c1); - return &res; -} - -func assert_reduced_E12D{range_check_ptr}(x: E12D) { - assert [range_check_ptr] = x.w0.d0; - assert [range_check_ptr + 1] = x.w0.d1; - assert [range_check_ptr + 2] = x.w0.d2; - assert [range_check_ptr + 3] = BASE_MIN_1 - x.w0.d0; - assert [range_check_ptr + 4] = BASE_MIN_1 - x.w0.d1; - assert [range_check_ptr + 5] = P2 - x.w0.d2; - assert [range_check_ptr + 6] = x.w1.d0; - assert [range_check_ptr + 7] = x.w1.d1; - assert [range_check_ptr + 8] = x.w1.d2; - assert [range_check_ptr + 9] = BASE_MIN_1 - x.w1.d0; - assert [range_check_ptr + 10] = BASE_MIN_1 - x.w1.d1; - assert [range_check_ptr + 11] = P2 - x.w1.d2; - assert [range_check_ptr + 12] = x.w2.d0; - assert [range_check_ptr + 13] = x.w2.d1; - assert [range_check_ptr + 14] = x.w2.d2; - assert [range_check_ptr + 15] = BASE_MIN_1 - x.w2.d0; - assert [range_check_ptr + 16] = BASE_MIN_1 - x.w2.d1; - assert [range_check_ptr + 17] = P2 - x.w2.d2; - assert [range_check_ptr + 18] = x.w3.d0; - assert [range_check_ptr + 19] = x.w3.d1; - assert [range_check_ptr + 20] = x.w3.d2; - assert [range_check_ptr + 21] = BASE_MIN_1 - x.w3.d0; - assert [range_check_ptr + 22] = BASE_MIN_1 - x.w3.d1; - assert [range_check_ptr + 23] = P2 - x.w3.d2; - assert [range_check_ptr + 24] = x.w4.d0; - assert [range_check_ptr + 25] = x.w4.d1; - assert [range_check_ptr + 26] = x.w4.d2; - assert [range_check_ptr + 27] = BASE_MIN_1 - x.w4.d0; - assert [range_check_ptr + 28] = BASE_MIN_1 - x.w4.d1; - assert [range_check_ptr + 29] = P2 - x.w4.d2; - assert [range_check_ptr + 30] = x.w5.d0; - assert [range_check_ptr + 31] = x.w5.d1; - assert [range_check_ptr + 32] = x.w5.d2; - assert [range_check_ptr + 33] = BASE_MIN_1 - x.w5.d0; - assert [range_check_ptr + 34] = BASE_MIN_1 - x.w5.d1; - assert [range_check_ptr + 35] = P2 - x.w5.d2; - assert [range_check_ptr + 36] = x.w6.d0; - assert [range_check_ptr + 37] = x.w6.d1; - assert [range_check_ptr + 38] = x.w6.d2; - assert [range_check_ptr + 39] = BASE_MIN_1 - x.w6.d0; - assert [range_check_ptr + 40] = BASE_MIN_1 - x.w6.d1; - assert [range_check_ptr + 41] = P2 - x.w6.d2; - assert [range_check_ptr + 42] = x.w7.d0; - assert [range_check_ptr + 43] = x.w7.d1; - assert [range_check_ptr + 44] = x.w7.d2; - assert [range_check_ptr + 45] = BASE_MIN_1 - x.w7.d0; - assert [range_check_ptr + 46] = BASE_MIN_1 - x.w7.d1; - assert [range_check_ptr + 47] = P2 - x.w7.d2; - assert [range_check_ptr + 48] = x.w8.d0; - assert [range_check_ptr + 49] = x.w8.d1; - assert [range_check_ptr + 50] = x.w8.d2; - assert [range_check_ptr + 51] = BASE_MIN_1 - x.w8.d0; - assert [range_check_ptr + 52] = BASE_MIN_1 - x.w8.d1; - assert [range_check_ptr + 53] = P2 - x.w8.d2; - assert [range_check_ptr + 54] = x.w9.d0; - assert [range_check_ptr + 55] = x.w9.d1; - assert [range_check_ptr + 56] = x.w9.d2; - assert [range_check_ptr + 57] = BASE_MIN_1 - x.w9.d0; - assert [range_check_ptr + 58] = BASE_MIN_1 - x.w9.d1; - assert [range_check_ptr + 59] = P2 - x.w9.d2; - assert [range_check_ptr + 60] = x.w10.d0; - assert [range_check_ptr + 61] = x.w10.d1; - assert [range_check_ptr + 62] = x.w10.d2; - assert [range_check_ptr + 63] = BASE_MIN_1 - x.w10.d0; - assert [range_check_ptr + 64] = BASE_MIN_1 - x.w10.d1; - assert [range_check_ptr + 65] = P2 - x.w10.d2; - assert [range_check_ptr + 66] = x.w11.d0; - assert [range_check_ptr + 67] = x.w11.d1; - assert [range_check_ptr + 68] = x.w11.d2; - assert [range_check_ptr + 69] = BASE_MIN_1 - x.w11.d0; - assert [range_check_ptr + 70] = BASE_MIN_1 - x.w11.d1; - assert [range_check_ptr + 71] = P2 - x.w11.d2; - - if (x.w0.d2 == P2) { - if (x.w0.d1 == P1) { - assert [range_check_ptr + 72] = P0 - 1 - x.w0.d0; - tempvar range_check_ptr = range_check_ptr + 73; - } else { - assert [range_check_ptr + 72] = P1 - 1 - x.w0.d1; - tempvar range_check_ptr = range_check_ptr + 73; - } - } else { - tempvar range_check_ptr = range_check_ptr + 72; - } - - if (x.w1.d2 == P2) { - if (x.w1.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w1.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w1.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w2.d2 == P2) { - if (x.w2.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w2.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w2.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w3.d2 == P2) { - if (x.w3.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w3.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w3.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w4.d2 == P2) { - if (x.w4.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w4.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w4.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w5.d2 == P2) { - if (x.w5.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w5.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w5.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w6.d2 == P2) { - if (x.w6.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w6.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w6.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w7.d2 == P2) { - if (x.w7.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w7.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w7.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w8.d2 == P2) { - if (x.w8.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w8.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w8.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w9.d2 == P2) { - if (x.w9.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w9.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w9.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w10.d2 == P2) { - if (x.w10.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w10.d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - x.w10.d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (x.w11.d2 == P2) { - if (x.w11.d1 == P1) { - assert [range_check_ptr] = P0 - 1 - x.w11.d0; - tempvar range_check_ptr = range_check_ptr + 1; - return (); - } else { - assert [range_check_ptr] = P1 - 1 - x.w11.d1; - tempvar range_check_ptr = range_check_ptr + 1; - return (); - } - } else { - tempvar range_check_ptr = range_check_ptr; - return (); - } -} - -func assert_E12D(x: E12D*, y: E12D*) { - assert x.w0.d0 = y.w0.d0; - assert x.w0.d1 = y.w0.d1; - assert x.w0.d2 = y.w0.d2; - assert x.w1.d0 = y.w1.d0; - assert x.w1.d1 = y.w1.d1; - assert x.w1.d2 = y.w1.d2; - assert x.w2.d0 = y.w2.d0; - assert x.w2.d1 = y.w2.d1; - assert x.w2.d2 = y.w2.d2; - assert x.w3.d0 = y.w3.d0; - assert x.w3.d1 = y.w3.d1; - assert x.w3.d2 = y.w3.d2; - assert x.w4.d0 = y.w4.d0; - assert x.w4.d1 = y.w4.d1; - assert x.w4.d2 = y.w4.d2; - assert x.w5.d0 = y.w5.d0; - assert x.w5.d1 = y.w5.d1; - assert x.w5.d2 = y.w5.d2; - assert x.w6.d0 = y.w6.d0; - assert x.w6.d1 = y.w6.d1; - assert x.w6.d2 = y.w6.d2; - assert x.w7.d0 = y.w7.d0; - assert x.w7.d1 = y.w7.d1; - assert x.w7.d2 = y.w7.d2; - assert x.w8.d0 = y.w8.d0; - assert x.w8.d1 = y.w8.d1; - assert x.w8.d2 = y.w8.d2; - assert x.w9.d0 = y.w9.d0; - assert x.w9.d1 = y.w9.d1; - assert x.w9.d2 = y.w9.d2; - assert x.w10.d0 = y.w10.d0; - assert x.w10.d1 = y.w10.d1; - assert x.w10.d2 = y.w10.d2; - assert x.w11.d0 = y.w11.d0; - assert x.w11.d1 = y.w11.d1; - assert x.w11.d2 = y.w11.d2; - return (); -} diff --git a/archive_tmp/bn254/towers/e2.cairo b/archive_tmp/bn254/towers/e2.cairo deleted file mode 100644 index fa12badc..00000000 --- a/archive_tmp/bn254/towers/e2.cairo +++ /dev/null @@ -1,764 +0,0 @@ -from src.bn254.fq import ( - fq_bigint3, - BigInt3, - fq_eq_zero, - UnreducedBigInt5, - UnreducedBigInt3, - bigint_mul, - reduce_5, - reduce_3, - assert_reduced_felt, -) -from starkware.cairo.common.registers import get_fp_and_pc -from src.bn254.curve import N_LIMBS, DEGREE, BASE, P0, P1, P2, NON_RESIDUE_E2_a0, NON_RESIDUE_E2_a1 - -struct E2 { - a0: BigInt3, - a1: BigInt3, -} - -namespace e2 { - func zero{}() -> E2* { - tempvar zero: E2* = new E2(BigInt3(0, 0, 0), BigInt3(0, 0, 0)); - return zero; - } - func one{}() -> E2* { - tempvar one = new E2(BigInt3(1, 0, 0), BigInt3(0, 0, 0)); - return one; - } - func is_zero{}(x: E2*) -> felt { - let a0_is_zero = fq_eq_zero(x.a0); - if (a0_is_zero == 0) { - return 0; - } - - let a1_is_zero = fq_eq_zero(x.a1); - return a1_is_zero; - } - func conjugate{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let a1 = fq_bigint3.neg(x.a1); - tempvar res: E2* = new E2(x.a0, a1); - return res; - } - func add{range_check_ptr}(x: E2*, y: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let a0 = fq_bigint3.add(x.a0, y.a0); - let a1 = fq_bigint3.add(x.a1, y.a1); - local res: E2 = E2(a0, a1); - return &res; - } - - func double{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let a0 = fq_bigint3.add(x.a0, x.a0); - let a1 = fq_bigint3.add(x.a1, x.a1); - local res: E2 = E2(a0, a1); - return &res; - } - func neg{range_check_ptr}(x: E2*) -> E2* { - let zero_2 = e2.zero(); - let res = sub(zero_2, x); - return res; - } - func sub{range_check_ptr}(x: E2*, y: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let a0 = fq_bigint3.sub(x.a0, y.a0); - let a1 = fq_bigint3.sub(x.a1, y.a1); - local res: E2 = E2(a0, a1); - return &res; - } - - func inv{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local inv0: BigInt3; - local inv1: BigInt3; - %{ - from src.hints.fq import bigint_pack, bigint_fill, get_p - from src.hints.e2 import E2 - p = get_p(ids) - a0 = bigint_pack(ids.x.a0, ids.N_LIMBS, ids.BASE) - a1 = bigint_pack(ids.x.a1, ids.N_LIMBS, ids.BASE) - x = E2(a0, a1, p) - x_inv = 1/x - bigint_fill(x_inv.a0,ids.inv0, ids.N_LIMBS, ids.BASE) - bigint_fill(x_inv.a1,ids.inv1, ids.N_LIMBS, ids.BASE) - %} - local inverse: E2 = E2(&inv0, &inv1); - - let check = e2.mul(x, &inverse); - let one = e2.one(); - let check = e2.sub(check, one); - let check_is_zero: felt = e2.is_zero(check); - assert check_is_zero = 1; - return &inverse; - } - - func div{range_check_ptr}(x: E2*, y: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local div0: BigInt3; - local div1: BigInt3; - %{ - from starkware.cairo.common.math_utils import as_int - from src.hints.fq import bigint_split - - assert 1 < ids.N_LIMBS <= 12 - assert ids.DEGREE == ids.N_LIMBS-1 - p,x,y=0, 2*[0], 2*[0] - x_refs = [ids.x.a0, ids.x.a1] - y_refs = [ids.y.a0, ids.y.a1] - - for i in range(ids.N_LIMBS): - for k in range(2): - x[k]+=as_int(getattr(x_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - y[k]+=as_int(getattr(y_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - p+=getattr(ids, 'P'+str(i)) * ids.BASE**i - - def inv_e2(a:(int, int)): - t0, t1 = (a[0] * a[0] % p, a[1] * a[1] % p) - t0 = (t0 + t1) % p - t1 = pow(t0, -1, p) - return a[0] * t1 % p, -(a[1] * t1) % p - def mul_e2(x:(int,int), y:(int,int)): - a = (x[0] + x[1]) * (y[0] + y[1]) % p - b, c = x[0]*y[0] % p, x[1]*y[1] % p - return (b - c) % p, (a - b - c) % p - - x=(x[0], x[1]) - y=(y[0], y[1]) - y_inv = inv_e2(y) - div = mul_e2(x, y_inv) - div0, div1 = split(div[0]), split(div[1]) - for i in range(ids.N_LIMBS): - setattr(ids.div0, 'd'+str(i), div0[i]) - setattr(ids.div1, 'd'+str(i), div1[i]) - %} - assert_reduced_felt(div0); - assert_reduced_felt(div1); - - local div: E2 = E2(div0, div1); - let check = e2.mul(y, &div); - assert_E2(x, check); - - return ÷ - } - - // Computes (mul_left * mul_right) - sub_right - func mul_sub{range_check_ptr}(mul_left: E2*, mul_right: E2*, sub_right: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // Mul mul_left and mul_right - let (a) = bigint_mul( - BigInt3( - mul_left.a0.d0 + mul_left.a1.d0, - mul_left.a0.d1 + mul_left.a1.d1, - mul_left.a0.d2 + mul_left.a1.d2, - ), - BigInt3( - mul_right.a0.d0 + mul_right.a1.d0, - mul_right.a0.d1 + mul_right.a1.d1, - mul_right.a0.d2 + mul_right.a1.d2, - ), - ); - let (b) = bigint_mul(mul_left.a0, mul_right.a0); - let (c) = bigint_mul(mul_left.a1, mul_right.a1); - - let res_a0 = reduce_5( - UnreducedBigInt5( - d0=b.d0 - c.d0 - sub_right.a0.d0, - d1=b.d1 - c.d1 - sub_right.a0.d1, - d2=b.d2 - c.d2 - sub_right.a0.d2, - d3=b.d3 - c.d3, - d4=b.d4 - c.d4, - ), - ); - - let res_a1 = reduce_5( - UnreducedBigInt5( - d0=a.d0 - b.d0 - c.d0 - sub_right.a1.d0, - d1=a.d1 - b.d1 - c.d1 - sub_right.a1.d1, - d2=a.d2 - b.d2 - c.d2 - sub_right.a1.d2, - d3=a.d3 - b.d3 - c.d3, - d4=a.d4 - b.d4 - c.d4, - ), - ); - - // End : - local res: E2 = E2(res_a0, res_a1); - return &res; - } - // Computes mul_left * (sub0_left - sub0_right) - sub1_right - func mul_sub0_sub1{range_check_ptr}( - mul_left: E2*, sub0_left: E2*, sub0_right: E2*, sub1_right: E2* - ) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - tempvar mul_right_a0 = BigInt3( - sub0_left.a0.d0 - sub0_right.a0.d0, - sub0_left.a0.d1 - sub0_right.a0.d1, - sub0_left.a0.d2 - sub0_right.a0.d2, - ); - - tempvar mul_right_a1 = BigInt3( - sub0_left.a1.d0 - sub0_right.a1.d0, - sub0_left.a1.d1 - sub0_right.a1.d1, - sub0_left.a1.d2 - sub0_right.a1.d2, - ); - - // Mul mul_left and mul_right - - let (a) = bigint_mul( - BigInt3( - mul_left.a0.d0 + mul_left.a1.d0, - mul_left.a0.d1 + mul_left.a1.d1, - mul_left.a0.d2 + mul_left.a1.d2, - ), - BigInt3( - mul_right_a0.d0 + mul_right_a1.d0, - mul_right_a0.d1 + mul_right_a1.d1, - mul_right_a0.d2 + mul_right_a1.d2, - ), - ); - - let (b) = bigint_mul(mul_left.a0, mul_right_a0); - let (c) = bigint_mul(mul_left.a1, mul_right_a1); - - let res_a0 = reduce_5( - UnreducedBigInt5( - d0=b.d0 - c.d0 - sub1_right.a0.d0, - d1=b.d1 - c.d1 - sub1_right.a0.d1, - d2=b.d2 - c.d2 - sub1_right.a0.d2, - d3=b.d3 - c.d3, - d4=b.d4 - c.d4, - ), - ); - - let res_a1 = reduce_5( - UnreducedBigInt5( - d0=a.d0 - b.d0 - c.d0 - sub1_right.a1.d0, - d1=a.d1 - b.d1 - c.d1 - sub1_right.a1.d1, - d2=a.d2 - b.d2 - c.d2 - sub1_right.a1.d2, - d3=a.d3 - b.d3 - c.d3, - d4=a.d4 - b.d4 - c.d4, - ), - ); - - // End : - local res: E2 = E2(res_a0, res_a1); - return &res; - } - - // Computes sub_left - mul_left * mul_right - func sub_mul{range_check_ptr}(sub_left: E2*, mul_left: E2*, mul_right: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // Mul mul_left and mul_right - let (a) = bigint_mul( - BigInt3( - mul_left.a0.d0 + mul_left.a1.d0, - mul_left.a0.d1 + mul_left.a1.d1, - mul_left.a0.d2 + mul_left.a1.d2, - ), - BigInt3( - mul_right.a0.d0 + mul_right.a1.d0, - mul_right.a0.d1 + mul_right.a1.d1, - mul_right.a0.d2 + mul_right.a1.d2, - ), - ); - let (b) = bigint_mul(mul_left.a0, mul_right.a0); - let (c) = bigint_mul(mul_left.a1, mul_right.a1); - - let res_a0 = reduce_5( - UnreducedBigInt5( - d0=sub_left.a0.d0 - (b.d0 - c.d0), - d1=sub_left.a0.d1 - (b.d1 - c.d1), - d2=sub_left.a0.d2 - (b.d2 - c.d2), - d3=-(b.d3 - c.d3), - d4=-(b.d4 - c.d4), - ), - ); - - let res_a1 = reduce_5( - UnreducedBigInt5( - d0=sub_left.a1.d0 - (a.d0 - b.d0 - c.d0), - d1=sub_left.a1.d1 - (a.d1 - b.d1 - c.d1), - d2=sub_left.a1.d2 - (a.d2 - b.d2 - c.d2), - d3=-(a.d3 - b.d3 - c.d3), - d4=-(a.d4 - b.d4 - c.d4), - ), - ); - - // End : - local res: E2 = E2(res_a0, res_a1); - return &res; - } - - // Computes (to_square * to_square) - (to_double + to_double) - func square_min_double{range_check_ptr}(to_square: E2*, to_double: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let (a0_unreduced) = bigint_mul( - BigInt3( - to_square.a0.d0 + to_square.a1.d0, - to_square.a0.d1 + to_square.a1.d1, - to_square.a0.d2 + to_square.a1.d2, - ), - BigInt3( - to_square.a0.d0 - to_square.a1.d0, - to_square.a0.d1 - to_square.a1.d1, - to_square.a0.d2 - to_square.a1.d2, - ), - ); - - let a0 = reduce_5( - UnreducedBigInt5( - d0=a0_unreduced.d0 - 2 * to_double.a0.d0, - d1=a0_unreduced.d1 - 2 * to_double.a0.d1, - d2=a0_unreduced.d2 - 2 * to_double.a0.d2, - d3=a0_unreduced.d3, - d4=a0_unreduced.d4, - ), - ); - - let (a1_unreduced) = bigint_mul(to_square.a0, to_square.a1); - let a1 = reduce_5( - UnreducedBigInt5( - d0=a1_unreduced.d0 + a1_unreduced.d0 - 2 * to_double.a1.d0, - d1=a1_unreduced.d1 + a1_unreduced.d1 - 2 * to_double.a1.d1, - d2=a1_unreduced.d2 + a1_unreduced.d2 - 2 * to_double.a1.d2, - d3=a1_unreduced.d3 + a1_unreduced.d3, - d4=a1_unreduced.d4 + a1_unreduced.d4, - ), - ); - - // End : - local res: E2 = E2(a0, a1); - return &res; - } - - // Computes to_square * to_square - (add_left + add_right) - func square_min_add{range_check_ptr}(to_square: E2*, add_left: E2*, add_right: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let (a0_unreduced) = bigint_mul( - BigInt3( - to_square.a0.d0 + to_square.a1.d0, - to_square.a0.d1 + to_square.a1.d1, - to_square.a0.d2 + to_square.a1.d2, - ), - BigInt3( - to_square.a0.d0 - to_square.a1.d0, - to_square.a0.d1 - to_square.a1.d1, - to_square.a0.d2 - to_square.a1.d2, - ), - ); - - let a0 = reduce_5( - UnreducedBigInt5( - d0=a0_unreduced.d0 - (add_left.a0.d0 + add_right.a0.d0), - d1=a0_unreduced.d1 - (add_left.a0.d1 + add_right.a0.d1), - d2=a0_unreduced.d2 - (add_left.a0.d2 + add_right.a0.d2), - d3=a0_unreduced.d3, - d4=a0_unreduced.d4, - ), - ); - - let (a1_unreduced) = bigint_mul(to_square.a0, to_square.a1); - let a1 = reduce_5( - UnreducedBigInt5( - d0=a1_unreduced.d0 + a1_unreduced.d0 - (add_left.a1.d0 + add_right.a1.d0), - d1=a1_unreduced.d1 + a1_unreduced.d1 - (add_left.a1.d1 + add_right.a1.d1), - d2=a1_unreduced.d2 + a1_unreduced.d2 - (add_left.a1.d2 + add_right.a1.d2), - d3=a1_unreduced.d3 + a1_unreduced.d3, - d4=a1_unreduced.d4 + a1_unreduced.d4, - ), - ); - - // End : - local res: E2 = E2(a0, a1); - return &res; - } - - func mul{range_check_ptr}(x: E2*, y: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let (a) = bigint_mul( - BigInt3(x.a0.d0 + x.a1.d0, x.a0.d1 + x.a1.d1, x.a0.d2 + x.a1.d2), - BigInt3(y.a0.d0 + y.a1.d0, y.a0.d1 + y.a1.d1, y.a0.d2 + y.a1.d2), - ); - let (b) = bigint_mul(x.a0, y.a0); - let (c) = bigint_mul(x.a1, y.a1); - - let z_a1_red = reduce_5( - UnreducedBigInt5( - d0=a.d0 - b.d0 - c.d0, - d1=a.d1 - b.d1 - c.d1, - d2=a.d2 - b.d2 - c.d2, - d3=a.d3 - b.d3 - c.d3, - d4=a.d4 - b.d4 - c.d4, - ), - ); - - let z_a0_red = reduce_5( - UnreducedBigInt5( - d0=b.d0 - c.d0, d1=b.d1 - c.d1, d2=b.d2 - c.d2, d3=b.d3 - c.d3, d4=b.d4 - c.d4 - ), - ); - - local res: E2 = E2(z_a0_red, z_a1_red); - - return &res; - } - func square{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let (a0_unreduced) = bigint_mul( - BigInt3(x.a0.d0 + x.a1.d0, x.a0.d1 + x.a1.d1, x.a0.d2 + x.a1.d2), - BigInt3(x.a0.d0 - x.a1.d0, x.a0.d1 - x.a1.d1, x.a0.d2 - x.a1.d2), - ); - - let a0 = reduce_5(a0_unreduced); - - let (a1_unreduced) = bigint_mul(x.a0, x.a1); - let a1 = reduce_5( - UnreducedBigInt5( - d0=a1_unreduced.d0 + a1_unreduced.d0, - d1=a1_unreduced.d1 + a1_unreduced.d1, - d2=a1_unreduced.d2 + a1_unreduced.d2, - d3=a1_unreduced.d3 + a1_unreduced.d3, - d4=a1_unreduced.d4 + a1_unreduced.d4, - ), - ); - - local res: E2 = E2(a0, a1); - return &res; - } - func mul_by_non_residue{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - tempvar b = BigInt3(x.a0.d0 * 9, x.a0.d1 * 9, x.a0.d2 * 9); - - let z_a0 = reduce_3(UnreducedBigInt3(b.d0 - x.a1.d0, b.d1 - x.a1.d1, b.d2 - x.a1.d2)); - - let z_a1 = reduce_3( - UnreducedBigInt3( - (x.a0.d0 + x.a1.d0) * 10 - b.d0 - x.a1.d0, - (x.a0.d1 + x.a1.d1) * 10 - b.d1 - x.a1.d1, - (x.a0.d2 + x.a1.d2) * 10 - b.d2 - x.a1.d2, - ), - ); - - local res: E2 = E2(z_a0, z_a1); - return &res; - } - - func mul_by_non_residue_1_power_1{range_check_ptr}(x: E2*) -> E2* { - // (8376118865763821496583973867626364092589906065868298776909617916018768340080,16469823323077808223889137241176536799009286646108169935659301613961712198316) - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local b0: BigInt3 = BigInt3( - d0=56977543755103530214089840, - d1=76718894460847708228296868, - d2=1399212181996938186361753, - ); - - local b1: BigInt3 = BigInt3( - d0=56554577518550867416146604, - d1=62827697919520388799913531, - d2=2751247659960983775503143, - ); - - local b: E2 = E2(b0, b1); - - return e2.mul(x, &b); - } - - func mul_by_non_residue_1_power_2{range_check_ptr}(x: E2*) -> E2* { - // (21575463638280843010398324269430826099269044274347216827212613867836435027261,10307601595873709700152284273816112264069230130616436755625194854815875713954) - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - tempvar b: E2* = new E2( - BigInt3( - d0=3867850599270032748795197, - d1=59179910958668734089937675, - d2=3604133613517150379884734, - ), - BigInt3( - d0=73280762357897828345301922, - d1=60669965255148047906141229, - d2=1721862111946328055790156, - ), - ); - - let res = e2.mul(x, b); - - return res; - } - - func mul_by_non_residue_1_power_3{range_check_ptr}(x: E2*) -> E2* { - // (2821565182194536844548159561693502659359617185244120367078079554186484126554,3505843767911556378687030309984248845540243509899259641013678093033130930403) - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - tempvar b: E2* = new E2( - BigInt3( - d0=11303442774922253301715802, - d1=31898913143253004590495399, - d2=471336240387150903625196, - ), - BigInt3( - d0=41537096460112517495238883, - d1=27350505930295183888819774, - d2=585643468873166363848779, - ), - ); - - let res = e2.mul(x, b); - return res; - } - - func mul_by_non_residue_1_power_4{range_check_ptr}(x: E2*) -> E2* { - // (2581911344467009335267311115468803099551665605076196740867805258568234346338,19937756971775647987995932169929341994314640652964949448313374472400716661030) - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - tempvar b: E2* = new E2( - BigInt3( - d0=25295107361554634830161762, - d1=3463420045217311122513658, - d2=431302595379882330951484, - ), - BigInt3( - d0=37209365669994046612537638, - d1=3328902638244012229372015, - d2=3330558327034787022893992, - ), - ); - - let res = e2.mul(x, b); - return res; - } - - func mul_by_non_residue_1_power_5{range_check_ptr}(x: E2*) -> E2* { - // (685108087231508774477564247770172212460312782337200605669322048753928464687,8447204650696766136447902020341177575205426561248465145919723016860428151883) - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - tempvar b: E2* = new E2( - BigInt3( - d0=50906283942319705428551983, - d1=30858614278432769585868118, - d2=114445794884446389703587, - ), - BigInt3( - d0=17126845291756250720906315, - d1=11008385818236961457857950, - d2=1411086905581811808217083, - ), - ); - let res = e2.mul(x, b); - return res; - } - - // // MulByNonResidue2Power1 set z=x*(9,1)^(2*(p^2-1)/6) and return z - func mul_by_non_residue_2_power_1{range_check_ptr}(x: E2*) -> E2* { - // 21888242871839275220042445260109153167277707414472061641714758635765020556617 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b = BigInt3( - d0=27116970078431962302577993, - d1=47901374225073923994320622, - d2=3656382694611191768409821, - ); - let a0 = fq_bigint3.mul(x.a0, b); - let a1 = fq_bigint3.mul(x.a1, b); - tempvar res: E2* = new E2(a0, a1); - return res; - } - - // // MulByNonResidue2Power2 set z=x*(9,1)^(2*(p^2-1)/6) and return z - func mul_by_non_residue_2_power_2{range_check_ptr}(x: E2*) -> E2* { - // 21888242871839275220042445260109153167277707414472061641714758635765020556616 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b = BigInt3( - d0=27116970078431962302577992, - d1=47901374225073923994320622, - d2=3656382694611191768409821, - ); - let a0 = fq_bigint3.mul(x.a0, b); - let a1 = fq_bigint3.mul(x.a1, b); - tempvar res: E2* = new E2(a0, a1); - return res; - } - - func mul_by_non_residue_2_power_3{range_check_ptr}(x: E2*) -> E2* { - // 21888242871839275222246405745257275088696311157297823662689037894645226208582 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b = BigInt3( - d0=60193888514187762220203334, - d1=27625954992973055882053025, - d2=3656382694611191768777988, - ); - let a0 = fq_bigint3.mul(x.a0, b); - let a1 = fq_bigint3.mul(x.a1, b); - tempvar res: E2* = new E2(a0, a1); - return res; - } - - func mul_by_non_residue_2_power_4{range_check_ptr}(x: E2*) -> E2* { - // 2203960485148121921418603742825762020974279258880205651966 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b = BigInt3(d0=33076918435755799917625342, d1=57095833223235399068927667, d2=368166); - let a0 = fq_bigint3.mul(x.a0, b); - let a1 = fq_bigint3.mul(x.a1, b); - tempvar res: E2* = new E2(a0, a1); - return res; - } - - func mul_by_non_residue_2_power_5{range_check_ptr}(x: E2*) -> E2* { - // 2203960485148121921418603742825762020974279258880205651967 - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b = BigInt3(d0=33076918435755799917625343, d1=57095833223235399068927667, d2=368166); - let a0 = fq_bigint3.mul(x.a0, b); - let a1 = fq_bigint3.mul(x.a1, b); - tempvar res: E2* = new E2(a0, a1); - return res; - } - - func mul_by_non_residue_3_power_1{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // (11697423496358154304825782922584725312912383441159505038794027105778954184319,303847389135065887422783454877609941456349188919719272345083954437860409601) - - local b: E2 = E2( - BigInt3( - d0=26380520981114516168550015, - d1=2659922689139687411300089, - d2=1954028795004333741506198, - ), - BigInt3( - d0=24452053258059047520747777, - d1=71991699407877657584963167, - d2=50757036183365933362366, - ), - ); - let res = mul(x, &b); - return res; - } - func mul_by_non_residue_3_power_2{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // (3772000881919853776433695186713858239009073593817195771773381919316419345261,2236595495967245188281701248203181795121068902605861227855261137820944008926) - - local b: E2 = E2( - BigInt3( - d0=49881535950925854215568237, - d1=60287325917862856540616053, - d2=630104427727001517535217, - ), - BigInt3( - d0=76342684491321466172049118, - d1=69776222374591092190805603, - d2=373618344523275288878896, - ), - ); - - let res = mul(x, &b); - return res; - } - func mul_by_non_residue_3_power_3{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // (19066677689644738377698246183563772429336693972053703295610958340458742082029,18382399103927718843559375435273026243156067647398564021675359801612095278180) - - local b: E2 = E2( - BigInt3( - d0=48890445739265508918487533, - d1=73098294305056318472752890, - d2=3185046454224040865152791, - ), - BigInt3( - d0=18656792054075244724964452, - d1=275449062677871993233251, - d2=3070739225738025404929209, - ), - ); - let res = mul(x, &b); - return res; - } - func mul_by_non_residue_3_power_4{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // (5324479202449903542726783395506214481928257762400643279780343368557297135718,16208900380737693084919495127334387981393726419856888799917914180988844123039) - - local b: E2 = E2( - BigInt3( - d0=58537478260502218559713382, - d1=4679104909699726279251414, - d2=889442506995496345770990, - ), - BigInt3( - d0=76874912822172478088160159, - d1=33529748033140522925695437, - d2=2707661057939728743000847, - ), - ); - let res = mul(x, &b); - return res; - } - func mul_by_non_residue_3_power_5{range_check_ptr}(x: E2*) -> E2* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // (8941241848238582420466759817324047081148088512956452953208002715982955420483,10338197737521362862238855242243140895517409139741313354160881284257516364953) - local b0: BigInt3 = BigInt3( - d0=73161962261556368022838083, - d1=48248071685730948322845273, - d2=1493614729773225145209193, - ); - - local b1: BigInt3 = BigInt3( - d0=3022925535795476534142105, - d1=10430105111501082603530368, - d2=1726973129925129896852251, - ); - local b: E2 = E2(b0, b1); - - let res = mul(x, &b); - return res; - } - func assert_E2(x: E2*, z: E2*) { - assert 0 = x.a0.d0 - z.a0.d0; - assert 0 = x.a0.d1 - z.a0.d1; - assert 0 = x.a0.d2 - z.a0.d2; - assert 0 = x.a1.d0 - z.a1.d0; - assert 0 = x.a1.d1 - z.a1.d1; - assert 0 = x.a1.d2 - z.a1.d2; - return (); - } -} diff --git a/archive_tmp/bn254/towers/e6.cairo b/archive_tmp/bn254/towers/e6.cairo deleted file mode 100644 index 4e7dad35..00000000 --- a/archive_tmp/bn254/towers/e6.cairo +++ /dev/null @@ -1,1519 +0,0 @@ -from starkware.cairo.common.registers import get_fp_and_pc -from starkware.cairo.common.uint256 import SHIFT, Uint256 - -from src.bn254.towers.e2 import e2, E2 -from src.bn254.fq import ( - BigInt3, - reduce_3, - UnreducedBigInt3, - assert_reduced_felt, - reduce_5, - UnreducedBigInt5, - BASE_MIN_1, - fq_bigint3, - unrededucedUint256_to_BigInt3, - bigint_mul, - verify_zero5, - P1_256, - P0_256, -) -from src.bn254.curve import N_LIMBS, DEGREE, BASE, P0, P1, P2, CURVE -from starkware.cairo.common.cairo_builtins import PoseidonBuiltin -from starkware.cairo.common.cairo_builtins import BitwiseBuiltin -from starkware.cairo.common.poseidon_state import PoseidonBuiltinState -from starkware.cairo.common.builtin_poseidon.poseidon import poseidon_hash - -struct E6 { - b0: E2*, - b1: E2*, - b2: E2*, -} - -struct E6DirectUnreduced { - v0: UnreducedBigInt3, - v1: UnreducedBigInt3, - v2: UnreducedBigInt3, - v3: UnreducedBigInt3, - v4: UnreducedBigInt3, - v5: UnreducedBigInt3, -} - -struct E5full { - v0: Uint256, - v1: Uint256, - v2: Uint256, - v3: Uint256, - v4: Uint256, -} - -struct E6full { - v0: BigInt3, - v1: BigInt3, - v2: BigInt3, - v3: BigInt3, - v4: BigInt3, - v5: BigInt3, -} - -struct PolyAcc6 { - xy: UnreducedBigInt3, - q: E5full, - r: E6DirectUnreduced, -} - -// r is known in advance to be 1* v -struct PolyAccSquare6 { - xy: UnreducedBigInt3, - q: E5full, - r: felt, -} - -struct ZPowers5 { - z_1: BigInt3, - z_2: BigInt3, - z_3: BigInt3, - z_4: BigInt3, - z_5: BigInt3, -} - -func assert_E6full(x: E6full*, y: E6full*) { - assert 0 = x.v0.d0 - y.v0.d0; - assert 0 = x.v0.d1 - y.v0.d1; - assert 0 = x.v0.d2 - y.v0.d2; - assert 0 = x.v1.d0 - y.v1.d0; - assert 0 = x.v1.d1 - y.v1.d1; - assert 0 = x.v1.d2 - y.v1.d2; - assert 0 = x.v2.d0 - y.v2.d0; - assert 0 = x.v2.d1 - y.v2.d1; - assert 0 = x.v2.d2 - y.v2.d2; - assert 0 = x.v3.d0 - y.v3.d0; - assert 0 = x.v3.d1 - y.v3.d1; - assert 0 = x.v3.d2 - y.v3.d2; - assert 0 = x.v4.d0 - y.v4.d0; - assert 0 = x.v4.d1 - y.v4.d1; - assert 0 = x.v4.d2 - y.v4.d2; - assert 0 = x.v5.d0 - y.v5.d0; - assert 0 = x.v5.d1 - y.v5.d1; - assert 0 = x.v5.d2 - y.v5.d2; - return (); -} - -func mul_trick_e6{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_5_ptr: ZPowers5*, - continuable_hash: felt, - poly_acc: PolyAcc6*, -}(x_ptr: E6full*, y_ptr: E6full*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E6full = [x_ptr]; - local y: E6full = [y_ptr]; - local z_pow1_5: ZPowers5 = [z_pow1_5_ptr]; - local r_v: E6full; - local q_v: E5full; - - %{ - from src.hints.e6 import mul_trick - from src.hints.fq import pack_e6d, fill_e6d - from tools.make.utils import split_128 - - x = pack_e6d(ids.x, ids.N_LIMBS, ids.BASE) - y = pack_e6d(ids.y, ids.N_LIMBS, ids.BASE) - q, r = mul_trick(x, y, ids.CURVE) - - fill_e6d(r, ids.r_v, ids.N_LIMBS, ids.BASE) - for i in range(5): - val = split_128(q[i]) - rsetattr(ids.q_v, f'v{i}.low', val[0]) - rsetattr(ids.q_v, f'v{i}.high', val[1]) - %} - - assert [range_check_ptr + 0] = r_v.v0.d0; - assert [range_check_ptr + 1] = r_v.v0.d1; - assert [range_check_ptr + 2] = r_v.v0.d2; - assert [range_check_ptr + 3] = r_v.v1.d0; - assert [range_check_ptr + 4] = r_v.v1.d1; - assert [range_check_ptr + 5] = r_v.v1.d2; - assert [range_check_ptr + 6] = r_v.v2.d0; - assert [range_check_ptr + 7] = r_v.v2.d1; - assert [range_check_ptr + 8] = r_v.v2.d2; - assert [range_check_ptr + 9] = r_v.v3.d0; - assert [range_check_ptr + 10] = r_v.v3.d1; - assert [range_check_ptr + 11] = r_v.v3.d2; - assert [range_check_ptr + 12] = r_v.v4.d0; - assert [range_check_ptr + 13] = r_v.v4.d1; - assert [range_check_ptr + 14] = r_v.v4.d2; - assert [range_check_ptr + 15] = r_v.v5.d0; - assert [range_check_ptr + 16] = r_v.v5.d1; - assert [range_check_ptr + 17] = r_v.v5.d2; - assert [range_check_ptr + 18] = q_v.v0.low; - assert [range_check_ptr + 19] = q_v.v0.high; - assert [range_check_ptr + 20] = q_v.v1.low; - assert [range_check_ptr + 21] = q_v.v1.high; - assert [range_check_ptr + 22] = q_v.v2.low; - assert [range_check_ptr + 23] = q_v.v2.high; - assert [range_check_ptr + 24] = q_v.v3.low; - assert [range_check_ptr + 25] = q_v.v3.high; - assert [range_check_ptr + 26] = q_v.v4.low; - assert [range_check_ptr + 27] = q_v.v4.high; - assert [range_check_ptr + 28] = 6 * 3 * BASE_MIN_1 - ( - r_v.v0.d0 + - r_v.v0.d1 + - r_v.v0.d2 + - r_v.v1.d0 + - r_v.v1.d1 + - r_v.v1.d2 + - r_v.v2.d0 + - r_v.v2.d1 + - r_v.v2.d2 + - r_v.v3.d0 + - r_v.v3.d1 + - r_v.v3.d2 + - r_v.v4.d0 + - r_v.v4.d1 + - r_v.v4.d2 + - r_v.v5.d0 + - r_v.v5.d1 + - r_v.v5.d2 - ); - - tempvar range_check_ptr = range_check_ptr + 29; - - tempvar two = 2; - assert poseidon_ptr.input = PoseidonBuiltinState( - s0=x.v0.d0 * x.v0.d1, s1=continuable_hash, s2=two - ); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=x.v0.d2 * x.v1.d0, s1=poseidon_ptr[0].output.s0, s2=two - ); - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=x.v1.d1 * x.v1.d2, s1=poseidon_ptr[1].output.s0, s2=two - ); - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=x.v2.d0 * x.v2.d1, s1=poseidon_ptr[2].output.s0, s2=two - ); - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=x.v2.d2 * x.v3.d0, s1=poseidon_ptr[3].output.s0, s2=two - ); - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=x.v3.d1 * x.v3.d2, s1=poseidon_ptr[4].output.s0, s2=two - ); - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=x.v4.d0 * x.v4.d1, s1=poseidon_ptr[5].output.s0, s2=two - ); - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=x.v4.d2 * x.v5.d0, s1=poseidon_ptr[6].output.s0, s2=two - ); - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=x.v5.d1 * x.v5.d2, s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=y.v0.d0 * y.v0.d1, s1=poseidon_ptr[8].output.s0, s2=two - ); - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=y.v0.d2 * y.v1.d0, s1=poseidon_ptr[9].output.s0, s2=two - ); - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=y.v1.d1 * y.v1.d2, s1=poseidon_ptr[10].output.s0, s2=two - ); - assert poseidon_ptr[12].input = PoseidonBuiltinState( - s0=y.v2.d0 * y.v2.d1, s1=poseidon_ptr[11].output.s0, s2=two - ); - assert poseidon_ptr[13].input = PoseidonBuiltinState( - s0=y.v2.d2 * y.v3.d0, s1=poseidon_ptr[12].output.s0, s2=two - ); - assert poseidon_ptr[14].input = PoseidonBuiltinState( - s0=y.v3.d1 * y.v3.d2, s1=poseidon_ptr[13].output.s0, s2=two - ); - assert poseidon_ptr[15].input = PoseidonBuiltinState( - s0=y.v4.d0 * y.v4.d1, s1=poseidon_ptr[14].output.s0, s2=two - ); - assert poseidon_ptr[16].input = PoseidonBuiltinState( - s0=y.v4.d2 * y.v5.d0, s1=poseidon_ptr[15].output.s0, s2=two - ); - assert poseidon_ptr[17].input = PoseidonBuiltinState( - s0=y.v5.d1 * y.v5.d2, s1=poseidon_ptr[16].output.s0, s2=two - ); - assert poseidon_ptr[18].input = PoseidonBuiltinState( - s0=q_v.v0.low * r_v.v0.d0, s1=poseidon_ptr[17].output.s0, s2=two - ); - assert poseidon_ptr[19].input = PoseidonBuiltinState( - s0=q_v.v0.high * r_v.v0.d1, s1=poseidon_ptr[18].output.s0, s2=two - ); - assert poseidon_ptr[20].input = PoseidonBuiltinState( - s0=q_v.v1.low * r_v.v0.d2, s1=poseidon_ptr[19].output.s0, s2=two - ); - assert poseidon_ptr[21].input = PoseidonBuiltinState( - s0=q_v.v1.high * r_v.v1.d0, s1=poseidon_ptr[20].output.s0, s2=two - ); - assert poseidon_ptr[22].input = PoseidonBuiltinState( - s0=q_v.v2.low * r_v.v1.d1, s1=poseidon_ptr[21].output.s0, s2=two - ); - assert poseidon_ptr[23].input = PoseidonBuiltinState( - s0=q_v.v2.high * r_v.v1.d2, s1=poseidon_ptr[22].output.s0, s2=two - ); - assert poseidon_ptr[24].input = PoseidonBuiltinState( - s0=q_v.v3.low * r_v.v2.d0, s1=poseidon_ptr[23].output.s0, s2=two - ); - assert poseidon_ptr[25].input = PoseidonBuiltinState( - s0=q_v.v3.high * r_v.v2.d1, s1=poseidon_ptr[24].output.s0, s2=two - ); - assert poseidon_ptr[26].input = PoseidonBuiltinState( - s0=q_v.v4.low * r_v.v2.d2, s1=poseidon_ptr[25].output.s0, s2=two - ); - assert poseidon_ptr[27].input = PoseidonBuiltinState( - s0=q_v.v4.high * r_v.v3.d0, s1=poseidon_ptr[26].output.s0, s2=two - ); - assert poseidon_ptr[28].input = PoseidonBuiltinState( - s0=r_v.v3.d1 * r_v.v3.d2, s1=poseidon_ptr[27].output.s0, s2=two - ); - assert poseidon_ptr[29].input = PoseidonBuiltinState( - s0=r_v.v4.d0 * r_v.v4.d1, s1=poseidon_ptr[28].output.s0, s2=two - ); - assert poseidon_ptr[30].input = PoseidonBuiltinState( - s0=r_v.v4.d2 * r_v.v5.d0, s1=poseidon_ptr[29].output.s0, s2=two - ); - assert poseidon_ptr[31].input = PoseidonBuiltinState( - s0=r_v.v5.d1 * r_v.v5.d2, s1=poseidon_ptr[30].output.s0, s2=two - ); - - tempvar x_of_z_v1: UnreducedBigInt5 = UnreducedBigInt5( - x.v1.d0 * z_pow1_5.z_1.d0, - x.v1.d0 * z_pow1_5.z_1.d1 + x.v1.d1 * z_pow1_5.z_1.d0, - x.v1.d0 * z_pow1_5.z_1.d2 + x.v1.d1 * z_pow1_5.z_1.d1 + x.v1.d2 * z_pow1_5.z_1.d0, - x.v1.d1 * z_pow1_5.z_1.d2 + x.v1.d2 * z_pow1_5.z_1.d1, - x.v1.d2 * z_pow1_5.z_1.d2, - ); - tempvar x_of_z_v2: UnreducedBigInt5 = UnreducedBigInt5( - x.v2.d0 * z_pow1_5.z_2.d0, - x.v2.d0 * z_pow1_5.z_2.d1 + x.v2.d1 * z_pow1_5.z_2.d0, - x.v2.d0 * z_pow1_5.z_2.d2 + x.v2.d1 * z_pow1_5.z_2.d1 + x.v2.d2 * z_pow1_5.z_2.d0, - x.v2.d1 * z_pow1_5.z_2.d2 + x.v2.d2 * z_pow1_5.z_2.d1, - x.v2.d2 * z_pow1_5.z_2.d2, - ); - tempvar x_of_z_v3: UnreducedBigInt5 = UnreducedBigInt5( - x.v3.d0 * z_pow1_5.z_3.d0, - x.v3.d0 * z_pow1_5.z_3.d1 + x.v3.d1 * z_pow1_5.z_3.d0, - x.v3.d0 * z_pow1_5.z_3.d2 + x.v3.d1 * z_pow1_5.z_3.d1 + x.v3.d2 * z_pow1_5.z_3.d0, - x.v3.d1 * z_pow1_5.z_3.d2 + x.v3.d2 * z_pow1_5.z_3.d1, - x.v3.d2 * z_pow1_5.z_3.d2, - ); - - tempvar x_of_z_v4: UnreducedBigInt5 = UnreducedBigInt5( - x.v4.d0 * z_pow1_5.z_4.d0, - x.v4.d0 * z_pow1_5.z_4.d1 + x.v4.d1 * z_pow1_5.z_4.d0, - x.v4.d0 * z_pow1_5.z_4.d2 + x.v4.d1 * z_pow1_5.z_4.d1 + x.v4.d2 * z_pow1_5.z_4.d0, - x.v4.d1 * z_pow1_5.z_4.d2 + x.v4.d2 * z_pow1_5.z_4.d1, - x.v4.d2 * z_pow1_5.z_4.d2, - ); - tempvar x_of_z_v5: UnreducedBigInt5 = UnreducedBigInt5( - x.v5.d0 * z_pow1_5.z_5.d0, - x.v5.d0 * z_pow1_5.z_5.d1 + x.v5.d1 * z_pow1_5.z_5.d0, - x.v5.d0 * z_pow1_5.z_5.d2 + x.v5.d1 * z_pow1_5.z_5.d1 + x.v5.d2 * z_pow1_5.z_5.d0, - x.v5.d1 * z_pow1_5.z_5.d2 + x.v5.d2 * z_pow1_5.z_5.d1, - x.v5.d2 * z_pow1_5.z_5.d2, - ); - - let x_of_z = reduce_5( - UnreducedBigInt5( - d0=x.v0.d0 + x_of_z_v1.d0 + x_of_z_v2.d0 + x_of_z_v3.d0 + x_of_z_v4.d0 + x_of_z_v5.d0, - d1=x.v0.d1 + x_of_z_v1.d1 + x_of_z_v2.d1 + x_of_z_v3.d1 + x_of_z_v4.d1 + x_of_z_v5.d1, - d2=x.v0.d2 + x_of_z_v1.d2 + x_of_z_v2.d2 + x_of_z_v3.d2 + x_of_z_v4.d2 + x_of_z_v5.d2, - d3=x_of_z_v1.d3 + x_of_z_v2.d3 + x_of_z_v3.d3 + x_of_z_v4.d3 + x_of_z_v5.d3, - d4=x_of_z_v1.d4 + x_of_z_v2.d4 + x_of_z_v3.d4 + x_of_z_v4.d4 + x_of_z_v5.d4, - ), - ); - - tempvar y_of_z_v1: UnreducedBigInt5 = UnreducedBigInt5( - y.v1.d0 * z_pow1_5.z_1.d0, - y.v1.d0 * z_pow1_5.z_1.d1 + y.v1.d1 * z_pow1_5.z_1.d0, - y.v1.d0 * z_pow1_5.z_1.d2 + y.v1.d1 * z_pow1_5.z_1.d1 + y.v1.d2 * z_pow1_5.z_1.d0, - y.v1.d1 * z_pow1_5.z_1.d2 + y.v1.d2 * z_pow1_5.z_1.d1, - y.v1.d2 * z_pow1_5.z_1.d2, - ); - - tempvar y_of_z_v2: UnreducedBigInt5 = UnreducedBigInt5( - y.v2.d0 * z_pow1_5.z_2.d0, - y.v2.d0 * z_pow1_5.z_2.d1 + y.v2.d1 * z_pow1_5.z_2.d0, - y.v2.d0 * z_pow1_5.z_2.d2 + y.v2.d1 * z_pow1_5.z_2.d1 + y.v2.d2 * z_pow1_5.z_2.d0, - y.v2.d1 * z_pow1_5.z_2.d2 + y.v2.d2 * z_pow1_5.z_2.d1, - y.v2.d2 * z_pow1_5.z_2.d2, - ); - - tempvar y_of_z_v3: UnreducedBigInt5 = UnreducedBigInt5( - y.v3.d0 * z_pow1_5.z_3.d0, - y.v3.d0 * z_pow1_5.z_3.d1 + y.v3.d1 * z_pow1_5.z_3.d0, - y.v3.d0 * z_pow1_5.z_3.d2 + y.v3.d1 * z_pow1_5.z_3.d1 + y.v3.d2 * z_pow1_5.z_3.d0, - y.v3.d1 * z_pow1_5.z_3.d2 + y.v3.d2 * z_pow1_5.z_3.d1, - y.v3.d2 * z_pow1_5.z_3.d2, - ); - - tempvar y_of_z_v4: UnreducedBigInt5 = UnreducedBigInt5( - y.v4.d0 * z_pow1_5.z_4.d0, - y.v4.d0 * z_pow1_5.z_4.d1 + y.v4.d1 * z_pow1_5.z_4.d0, - y.v4.d0 * z_pow1_5.z_4.d2 + y.v4.d1 * z_pow1_5.z_4.d1 + y.v4.d2 * z_pow1_5.z_4.d0, - y.v4.d1 * z_pow1_5.z_4.d2 + y.v4.d2 * z_pow1_5.z_4.d1, - y.v4.d2 * z_pow1_5.z_4.d2, - ); - - tempvar y_of_z_v5: UnreducedBigInt5 = UnreducedBigInt5( - y.v5.d0 * z_pow1_5.z_5.d0, - y.v5.d0 * z_pow1_5.z_5.d1 + y.v5.d1 * z_pow1_5.z_5.d0, - y.v5.d0 * z_pow1_5.z_5.d2 + y.v5.d1 * z_pow1_5.z_5.d1 + y.v5.d2 * z_pow1_5.z_5.d0, - y.v5.d1 * z_pow1_5.z_5.d2 + y.v5.d2 * z_pow1_5.z_5.d1, - y.v5.d2 * z_pow1_5.z_5.d2, - ); - - let y_of_z = reduce_5( - UnreducedBigInt5( - d0=y.v0.d0 + y_of_z_v1.d0 + y_of_z_v2.d0 + y_of_z_v3.d0 + y_of_z_v4.d0 + y_of_z_v5.d0, - d1=y.v0.d1 + y_of_z_v1.d1 + y_of_z_v2.d1 + y_of_z_v3.d1 + y_of_z_v4.d1 + y_of_z_v5.d1, - d2=y.v0.d2 + y_of_z_v1.d2 + y_of_z_v2.d2 + y_of_z_v3.d2 + y_of_z_v4.d2 + y_of_z_v5.d2, - d3=y_of_z_v1.d3 + y_of_z_v2.d3 + y_of_z_v3.d3 + y_of_z_v4.d3 + y_of_z_v5.d3, - d4=y_of_z_v1.d4 + y_of_z_v2.d4 + y_of_z_v3.d4 + y_of_z_v4.d4 + y_of_z_v5.d4, - ), - ); - - // let (xy_acc) = bigint_mul(x_of_z, y_of_z); - let xy_acc = reduce_5( - UnreducedBigInt5( - d0=x_of_z.d0 * y_of_z.d0, - d1=x_of_z.d0 * y_of_z.d1 + x_of_z.d1 * y_of_z.d0, - d2=x_of_z.d0 * y_of_z.d2 + x_of_z.d1 * y_of_z.d1 + x_of_z.d2 * y_of_z.d0, - d3=x_of_z.d1 * y_of_z.d2 + x_of_z.d2 * y_of_z.d1, - d4=x_of_z.d2 * y_of_z.d2, - ), - ); - - let poseidon_ptr = poseidon_ptr + 32 * PoseidonBuiltin.SIZE; - let continuable_hash = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let random_linear_combination_coeff = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s1; - - assert bitwise_ptr.x = random_linear_combination_coeff; - assert bitwise_ptr.y = BASE_MIN_1; - tempvar c_i = bitwise_ptr.x_and_y; - let bitwise_ptr = bitwise_ptr + BitwiseBuiltin.SIZE; - - local poly_acc_f: PolyAcc6 = PolyAcc6( - xy=UnreducedBigInt3( - d0=poly_acc.xy.d0 + c_i * xy_acc.d0, - d1=poly_acc.xy.d1 + c_i * xy_acc.d1, - d2=poly_acc.xy.d2 + c_i * xy_acc.d2, - ), - q=E5full( - Uint256(c_i * q_v.v0.low + poly_acc.q.v0.low, c_i * q_v.v0.high + poly_acc.q.v0.high), - Uint256(c_i * q_v.v1.low + poly_acc.q.v1.low, c_i * q_v.v1.high + poly_acc.q.v1.high), - Uint256(c_i * q_v.v2.low + poly_acc.q.v2.low, c_i * q_v.v2.high + poly_acc.q.v2.high), - Uint256(c_i * q_v.v3.low + poly_acc.q.v3.low, c_i * q_v.v3.high + poly_acc.q.v3.high), - Uint256(c_i * q_v.v4.low + poly_acc.q.v4.low, c_i * q_v.v4.high + poly_acc.q.v4.high), - ), - r=E6DirectUnreduced( - UnreducedBigInt3( - c_i * r_v.v0.d0 + poly_acc.r.v0.d0, - c_i * r_v.v0.d1 + poly_acc.r.v0.d1, - c_i * r_v.v0.d2 + poly_acc.r.v0.d2, - ), - UnreducedBigInt3( - c_i * r_v.v1.d0 + poly_acc.r.v1.d0, - c_i * r_v.v1.d1 + poly_acc.r.v1.d1, - c_i * r_v.v1.d2 + poly_acc.r.v1.d2, - ), - UnreducedBigInt3( - c_i * r_v.v2.d0 + poly_acc.r.v2.d0, - c_i * r_v.v2.d1 + poly_acc.r.v2.d1, - c_i * r_v.v2.d2 + poly_acc.r.v2.d2, - ), - UnreducedBigInt3( - c_i * r_v.v3.d0 + poly_acc.r.v3.d0, - c_i * r_v.v3.d1 + poly_acc.r.v3.d1, - c_i * r_v.v3.d2 + poly_acc.r.v3.d2, - ), - UnreducedBigInt3( - c_i * r_v.v4.d0 + poly_acc.r.v4.d0, - c_i * r_v.v4.d1 + poly_acc.r.v4.d1, - c_i * r_v.v4.d2 + poly_acc.r.v4.d2, - ), - UnreducedBigInt3( - c_i * r_v.v5.d0 + poly_acc.r.v5.d0, - c_i * r_v.v5.d1 + poly_acc.r.v5.d1, - c_i * r_v.v5.d2 + poly_acc.r.v5.d2, - ), - ), - ); - let poly_acc = &poly_acc_f; - return &r_v; -} - -func div_trick_e6{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_5_ptr: ZPowers5*, - continuable_hash: felt, - poly_acc: PolyAcc6*, -}(x: E6full*, y: E6full*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - // local div: E6full; - tempvar div_start = range_check_ptr; - let div_v0d0 = [range_check_ptr]; - let div_v0d1 = [range_check_ptr + 1]; - let div_v0d2 = [range_check_ptr + 2]; - let div_v1d0 = [range_check_ptr + 3]; - let div_v1d1 = [range_check_ptr + 4]; - let div_v1d2 = [range_check_ptr + 5]; - let div_v2d0 = [range_check_ptr + 6]; - let div_v2d1 = [range_check_ptr + 7]; - let div_v2d2 = [range_check_ptr + 8]; - let div_v3d0 = [range_check_ptr + 9]; - let div_v3d1 = [range_check_ptr + 10]; - let div_v3d2 = [range_check_ptr + 11]; - let div_v4d0 = [range_check_ptr + 12]; - let div_v4d1 = [range_check_ptr + 13]; - let div_v4d2 = [range_check_ptr + 14]; - let div_v5d0 = [range_check_ptr + 15]; - let div_v5d1 = [range_check_ptr + 16]; - let div_v5d2 = [range_check_ptr + 17]; - - %{ - from starkware.cairo.common.math_utils import as_int - from src.hints.fq import bigint_split, bigint_pack - from tools.py.extension_trick import flatten, v_to_gnark, gnark_to_v, div_e6, pack_e6 - - x, y = [], [] - for i in range(6): - x.append(bigint_pack(getattr(ids.x, 'v'+str(i)), ids.N_LIMBS, ids.BASE)) - y.append(bigint_pack(getattr(ids.y, 'v'+str(i)), ids.N_LIMBS, ids.BASE)) - - x_gnark, y_gnark = pack_e6(v_to_gnark(x)), pack_e6(v_to_gnark(y)) - z = flatten(div_e6(x_gnark, y_gnark)) - z = gnark_to_v(z) - e = [bigint_split(x, ids.N_LIMBS, ids.BASE) for x in z] - - for i in range(6): - for k in range(ids.N_LIMBS): - setattr(ids, f'div_v{i}d{k}', e[i][k]) - %} - - // assert_reduced_e6full(div); - let div: E6full* = cast(div_start, E6full*); - assert [range_check_ptr + 18] = BASE_MIN_1 - div_v0d0; - assert [range_check_ptr + 19] = BASE_MIN_1 - div_v0d1; - assert [range_check_ptr + 20] = P2 - div_v0d2; - assert [range_check_ptr + 21] = BASE_MIN_1 - div_v1d0; - assert [range_check_ptr + 22] = BASE_MIN_1 - div_v1d1; - assert [range_check_ptr + 23] = P2 - div_v1d2; - assert [range_check_ptr + 24] = BASE_MIN_1 - div_v2d0; - assert [range_check_ptr + 25] = BASE_MIN_1 - div_v2d1; - assert [range_check_ptr + 26] = P2 - div_v2d2; - assert [range_check_ptr + 27] = BASE_MIN_1 - div_v3d0; - assert [range_check_ptr + 28] = BASE_MIN_1 - div_v3d1; - assert [range_check_ptr + 29] = P2 - div_v3d2; - assert [range_check_ptr + 30] = BASE_MIN_1 - div_v4d0; - assert [range_check_ptr + 31] = BASE_MIN_1 - div_v4d1; - assert [range_check_ptr + 32] = P2 - div_v4d2; - assert [range_check_ptr + 33] = BASE_MIN_1 - div_v5d0; - assert [range_check_ptr + 34] = BASE_MIN_1 - div_v5d1; - assert [range_check_ptr + 35] = P2 - div_v5d2; - - if (div_v0d2 == P2) { - if (div_v0d1 == P1) { - assert [range_check_ptr + 36] = P0 - 1 - div_v0d0; - tempvar range_check_ptr = range_check_ptr + 37; - } else { - assert [range_check_ptr + 36] = P1 - 1 - div_v0d1; - tempvar range_check_ptr = range_check_ptr + 37; - } - } else { - tempvar range_check_ptr = range_check_ptr + 36; - } - - if (div_v1d2 == P2) { - if (div_v1d1 == P1) { - assert [range_check_ptr] = P0 - 1 - div_v1d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - div_v1d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (div_v2d2 == P2) { - if (div_v2d1 == P1) { - assert [range_check_ptr] = P0 - 1 - div_v2d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - div_v2d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (div_v3d2 == P2) { - if (div_v3d1 == P1) { - assert [range_check_ptr] = P0 - 1 - div_v3d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - div_v3d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (div_v4d2 == P2) { - if (div_v4d1 == P1) { - assert [range_check_ptr] = P0 - 1 - div_v4d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - div_v4d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - if (div_v5d2 == P2) { - if (div_v5d1 == P1) { - assert [range_check_ptr] = P0 - 1 - div_v5d0; - tempvar range_check_ptr = range_check_ptr + 1; - } else { - assert [range_check_ptr] = P1 - 1 - div_v5d1; - tempvar range_check_ptr = range_check_ptr + 1; - } - } else { - tempvar range_check_ptr = range_check_ptr; - } - - let check = mul_trick_e6(y, div); - - assert_E6full(x, check); - - return div; -} - -namespace e6 { - func add{range_check_ptr}(x: E6*, y: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let b0 = e2.add(x.b0, y.b0); - let b1 = e2.add(x.b1, y.b1); - let b2 = e2.add(x.b2, y.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - func add_full{range_check_ptr}(x: E6full*, y: E6full*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let v0 = fq_bigint3.add(x.v0, y.v0); - let v1 = fq_bigint3.add(x.v1, y.v1); - let v2 = fq_bigint3.add(x.v2, y.v2); - let v3 = fq_bigint3.add(x.v3, y.v3); - let v4 = fq_bigint3.add(x.v4, y.v4); - let v5 = fq_bigint3.add(x.v5, y.v5); - local res: E6full = E6full(v0, v1, v2, v3, v4, v5); - return &res; - } - - func sub{range_check_ptr}(x: E6*, y: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let b0 = e2.sub(x.b0, y.b0); - let b1 = e2.sub(x.b1, y.b1); - let b2 = e2.sub(x.b2, y.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func double{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let b0 = e2.double(x.b0); - let b1 = e2.double(x.b1); - let b2 = e2.double(x.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func neg{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let b0 = e2.neg(x.b0); - let b1 = e2.neg(x.b1); - let b2 = e2.neg(x.b2); - local res: E6 = E6(b0, b1, b2); - return &res; - } - func neg_full{range_check_ptr}(x: E6full*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let v0 = fq_bigint3.neg(x.v0); - let v1 = fq_bigint3.neg(x.v1); - let v2 = fq_bigint3.neg(x.v2); - let v3 = fq_bigint3.neg(x.v3); - let v4 = fq_bigint3.neg(x.v4); - let v5 = fq_bigint3.neg(x.v5); - local res: E6full = E6full(v0, v1, v2, v3, v4, v5); - return &res; - } - func mul_by_non_residue{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let b0 = x.b2; - let b1 = x.b0; - let b2 = x.b1; - let b0 = e2.mul_by_non_residue(b0); - local res: E6 = E6(b0, b1, b2); - return &res; - } - - func mul_by_0{range_check_ptr}(x: E6*, b0: E2*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let a = e2.mul(x.b0, b0); - - let tmp = e2.add(x.b0, x.b1); - let t1 = e2.mul(b0, tmp); - let t1 = e2.sub(t1, a); - - let tmp = e2.add(x.b0, x.b2); - let t2 = e2.mul(b0, tmp); - let t2 = e2.sub(t2, a); - - local res: E6 = E6(a, t1, t2); - return &res; - } - - func zero{}() -> E6* { - let b0 = e2.zero(); - let b1 = e2.zero(); - let b2 = e2.zero(); - tempvar res = new E6(b0, b1, b2); - return res; - } - func one{}() -> E6* { - let b0 = e2.one(); - let b1 = e2.zero(); - let b2 = e2.zero(); - tempvar res = new E6(b0, b1, b2); - return res; - } - func is_zero{}(x: E6*) -> felt { - alloc_locals; - let b0_is_zero = e2.is_zero(x.b0); - - if (b0_is_zero == 0) { - return 0; - } - let b1_is_zero = e2.is_zero(x.b1); - - if (b1_is_zero == 0) { - return 0; - } - let b2_is_zero = e2.is_zero(x.b2); - return b2_is_zero; - } - func is_zero_full{}(x: E6full*) -> felt { - if (x.v0.d0 != 0) { - return 0; - } - if (x.v0.d1 != 0) { - return 0; - } - if (x.v0.d2 != 0) { - return 0; - } - if (x.v1.d0 != 0) { - return 0; - } - if (x.v1.d1 != 0) { - return 0; - } - if (x.v1.d2 != 0) { - return 0; - } - if (x.v2.d0 != 0) { - return 0; - } - if (x.v2.d1 != 0) { - return 0; - } - if (x.v2.d2 != 0) { - return 0; - } - if (x.v3.d0 != 0) { - return 0; - } - if (x.v3.d1 != 0) { - return 0; - } - if (x.v3.d2 != 0) { - return 0; - } - if (x.v4.d0 != 0) { - return 0; - } - if (x.v4.d1 != 0) { - return 0; - } - if (x.v4.d2 != 0) { - return 0; - } - if (x.v5.d0 != 0) { - return 0; - } - if (x.v5.d1 != 0) { - return 0; - } - if (x.v5.d2 != 0) { - return 0; - } - return 1; - } - func assert_E6(x: E6*, z: E6*) { - e2.assert_E2(x.b0, z.b0); - e2.assert_E2(x.b1, z.b1); - e2.assert_E2(x.b2, z.b2); - return (); - } - // FrobeniusTorus raises a compressed elements x ∈ E6 to the modulus p - // and returns x^p / v^((p-1)/2) - func frobenius_torus{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let t0 = e2.conjugate(x.b0); - let t1 = e2.conjugate(x.b1); - let t2 = e2.conjugate(x.b2); - - let t1 = e2.mul_by_non_residue_1_power_2(t1); - let t2 = e2.mul_by_non_residue_1_power_4(t2); - - local v0_a0: BigInt3 = BigInt3( - 13419658832840509084547896, 24313674309344809517854541, 3101566081603796213633544 - ); - local v0_a1: BigInt3 = BigInt3( - 28091364253695942324804508, 36789956481330324667102661, 955892070833573926637211 - ); - local v0: E2 = E2(v0_a0, v0_a1); - - local res_tmp: E6 = E6(t0, t1, t2); - let res = mul_by_0(&res_tmp, &v0); - - return res; - } - // Todo : Try to derive complete formulas and avoid conversion - func frobenius_torus_full{range_check_ptr}(x: E6full*) -> E6full* { - alloc_locals; - let x_gnark = v_to_gnark_reduced([x]); - let frobenius = frobenius_torus(x_gnark); - let res = gnark_to_v(frobenius); - return res; - } - // FrobeniusSquareTorus raises a compressed elements x ∈ E6 to the square modulus p^2 - // and returns x^(p^2) / v^((p^2-1)/2) - // func frobenius_square_torus{range_check_ptr}(x: E6*) -> E6* { - // alloc_locals; - // let (__fp__, _) = get_fp_and_pc(); - - // local v0: BigInt3 = BigInt3(33076918435755799917625343, 57095833223235399068927667, 368166); - // let t0 = e2.mul_by_element(&v0, x.b0); - // let t1 = e2.mul_by_non_residue_2_power_2(x.b1); - // let t1 = e2.mul_by_element(&v0, t1); - // let t2 = e2.mul_by_non_residue_2_power_4(x.b2); - // let t2 = e2.mul_by_element(&v0, t2); - - // local res: E6 = E6(t0, t1, t2); - // return &res; - // } - - // FrobeniusSquareTorus raises a compressed elements x ∈ E6 to the square modulus p^2 - // and returns x^(p^2) / v^((p^2-1)/2) - func frobenius_square_torus_full{range_check_ptr}(x: E6full*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - // v0 = 2203960485148121921418603742825762020974279258880205651967 - // v0*nr2p2 = 21888242871839275220042445260109153167277707414472061641714758635765020556617 - // v0*nr2p4 = 21888242871839275222246405745257275088696311157297823662689037894645226208582 - - let v0 = fq_bigint3.mul( - x.v0, BigInt3(33076918435755799917625343, 57095833223235399068927667, 368166) - ); - let v1 = fq_bigint3.mul( - x.v1, - BigInt3( - 27116970078431962302577993, 47901374225073923994320622, 3656382694611191768409821 - ), - ); // * nr2p2 / v^((p^2-1)/2) - let v2 = fq_bigint3.mul( - x.v2, - BigInt3( - 60193888514187762220203334, 27625954992973055882053025, 3656382694611191768777988 - ), - ); // * nr2p4 / v^((p^2-1)/2) - - let v3 = fq_bigint3.mul( - x.v3, BigInt3(33076918435755799917625343, 57095833223235399068927667, 368166) - ); // * 1 / v^((p^2-1)/2) - - let v4 = fq_bigint3.mul( - x.v4, - BigInt3( - 27116970078431962302577993, 47901374225073923994320622, 3656382694611191768409821 - ), - ); // * nr2p2 / v^((p^2-1)/2) - - let v5 = fq_bigint3.mul( - x.v5, - BigInt3( - 60193888514187762220203334, 27625954992973055882053025, 3656382694611191768777988 - ), - ); // * nr2p4 / v^((p^2-1)/2) - - local res: E6full = E6full(v0, v1, v2, v3, v4, v5); - return &res; - } - - // FrobeniusCubeTorus raises a compressed elements y ∈ E6 to the cube modulus p^3 - // and returns y^(p^3) / v^((p^3-1)/2) - func frobenius_cube_torus{range_check_ptr}(x: E6*) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let t0 = e2.conjugate(x.b0); - let t1 = e2.conjugate(x.b1); - let t2 = e2.conjugate(x.b2); - - let t1 = e2.mul_by_non_residue_3_power_2(t1); - let t2 = e2.mul_by_non_residue_3_power_4(t2); - - local v0_a0: BigInt3 = BigInt3( - 33813367533073246051653320, 24966032303833368470752936, 1702353899606858027271790 - ); - - local v0_a1: BigInt3 = BigInt3( - 24452053258059047520747777, 71991699407877657584963167, 50757036183365933362366 - ); - - local v0: E2 = E2(v0_a0, v0_a1); - - local res_tmp: E6 = E6(t0, t1, t2); - let res = mul_by_0(&res_tmp, &v0); - - return res; - } - // Todo : Try to derive complete formulas and avoid conversion - func frobenius_cube_torus_full{range_check_ptr}(x: E6full*) -> E6full* { - alloc_locals; - let x_gnark = v_to_gnark_reduced([x]); - let frobenius = frobenius_cube_torus(x_gnark); - let res = gnark_to_v(frobenius); - return res; - } - - func mul_torus{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_5_ptr: ZPowers5*, - continuable_hash: felt, - poly_acc: PolyAcc6*, - }(y1: E6full*, y2: E6full*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - // let num = mul_plus_one_b1(y1, y2); - let num_min_v = mul_trick_e6(y1, y2); - local num: E6full = E6full( - num_min_v.v0, - BigInt3(num_min_v.v1.d0 + 1, num_min_v.v1.d1, num_min_v.v1.d2), - num_min_v.v2, - num_min_v.v3, - num_min_v.v4, - num_min_v.v5, - ); - // let den = add(y1, y2); - local den: E6full = E6full( - BigInt3(y1.v0.d0 + y2.v0.d0, y1.v0.d1 + y2.v0.d1, y1.v0.d2 + y2.v0.d2), - BigInt3(y1.v1.d0 + y2.v1.d0, y1.v1.d1 + y2.v1.d1, y1.v1.d2 + y2.v1.d2), - BigInt3(y1.v2.d0 + y2.v2.d0, y1.v2.d1 + y2.v2.d1, y1.v2.d2 + y2.v2.d2), - BigInt3(y1.v3.d0 + y2.v3.d0, y1.v3.d1 + y2.v3.d1, y1.v3.d2 + y2.v3.d2), - BigInt3(y1.v4.d0 + y2.v4.d0, y1.v4.d1 + y2.v4.d1, y1.v4.d2 + y2.v4.d2), - BigInt3(y1.v5.d0 + y2.v5.d0, y1.v5.d1 + y2.v5.d1, y1.v5.d2 + y2.v5.d2), - ); - let res = div_trick_e6(&num, &den); - - return res; - } - - func expt_torus{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_5_ptr: ZPowers5*, - continuable_hash: felt, - poly_acc_sq: PolyAccSquare6*, - poly_acc: PolyAcc6*, - }(x: E6full*) -> E6full* { - alloc_locals; - let t3 = square_torus(x); - let t5 = square_torus(t3); - let result = square_torus(t5); - let t0 = square_torus(result); - let t2 = mul_torus(x, t0); - let t0 = mul_torus(t3, t2); - let t1 = mul_torus(x, t0); - let t4 = mul_torus(result, t2); - let t6 = square_torus(t2); - let t1 = mul_torus(t0, t1); - let t0 = mul_torus(t3, t1); - let t6 = n_square_torus(t6, 6); - let t5 = mul_torus(t5, t6); - let t5 = mul_torus(t4, t5); - let t5 = n_square_torus(t5, 7); - let t4 = mul_torus(t4, t5); - let t4 = n_square_torus(t4, 8); - let t4 = mul_torus(t0, t4); - let t3 = mul_torus(t3, t4); - let t3 = n_square_torus(t3, 6); - let t2 = mul_torus(t2, t3); - let t2 = n_square_torus(t2, 8); - let t2 = mul_torus(t0, t2); - let t2 = n_square_torus(t2, 6); - let t2 = mul_torus(t0, t2); - let t2 = n_square_torus(t2, 10); - let t1 = mul_torus(t1, t2); - let t1 = n_square_torus(t1, 6); - let t0 = mul_torus(t0, t1); - let z = mul_torus(result, t0); - return z; - } - - func inverse_torus{range_check_ptr}(x: E6full*) -> E6full* { - return neg_full(x); - } - - func square_torus{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_5_ptr: ZPowers5*, - continuable_hash: felt, - poly_acc_sq: PolyAccSquare6*, - }(x_ptr: E6full*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - local x: E6full = [x_ptr]; - local z_pow1_5: ZPowers5 = [z_pow1_5_ptr]; - local sq: E6full; - local q_v: E5full; - tempvar two = 2; - - %{ - from starkware.cairo.common.math_utils import as_int - from tools.py.extension_trick import flatten, v_to_gnark, gnark_to_v, square_torus_e6 - x=6*[0] - x_refs = [ids.x.v0, ids.x.v1, ids.x.v2, ids.x.v3, ids.x.v4, ids.x.v5] - for i in range(ids.N_LIMBS): - for k in range(6): - x[k] += as_int(getattr(x_refs[k], 'd'+str(i)), PRIME) * ids.BASE**i - x_gnark = pack_e6(v_to_gnark(x)) - - z = gnark_to_v(flatten(square_torus_e6(x_gnark))) - for i, e in enumerate(z): - bigint_fill(e, getattr(ids.sq, 'v'+str(i)), ids.N_LIMBS, ids.BASE) - %} - tempvar v_tmp: E6full = E6full( - BigInt3(two * sq.v0.d0 - x.v0.d0, two * sq.v0.d1 - x.v0.d1, two * sq.v0.d2 - x.v0.d2), - BigInt3(two * sq.v1.d0 - x.v1.d0, two * sq.v1.d1 - x.v1.d1, two * sq.v1.d2 - x.v1.d2), - BigInt3(two * sq.v2.d0 - x.v2.d0, two * sq.v2.d1 - x.v2.d1, two * sq.v2.d2 - x.v2.d2), - BigInt3(two * sq.v3.d0 - x.v3.d0, two * sq.v3.d1 - x.v3.d1, two * sq.v3.d2 - x.v3.d2), - BigInt3(two * sq.v4.d0 - x.v4.d0, two * sq.v4.d1 - x.v4.d1, two * sq.v4.d2 - x.v4.d2), - BigInt3(two * sq.v5.d0 - x.v5.d0, two * sq.v5.d1 - x.v5.d1, two * sq.v5.d2 - x.v5.d2), - ); - %{ - from src.hints.e6 import mul_trick - from src.hints.fq import pack_e6d - from tools.make.utils import split_128 - - x = pack_e6d(ids.v_tmp, ids.N_LIMBS, ids.BASE) - y = pack_e6d(ids.x, ids.N_LIMBS, ids.BASE) - q, r = mul_trick(x, y, ids.CURVE) - - for i in range(5): - val = split_128(q[i]) - rsetattr(ids.q_v, f'v{i}.low', val[0]) - rsetattr(ids.q_v, f'v{i}.high', val[1]) - %} - assert [range_check_ptr + 0] = sq.v0.d0; - assert [range_check_ptr + 1] = sq.v0.d1; - assert [range_check_ptr + 2] = sq.v0.d2; - assert [range_check_ptr + 3] = sq.v1.d0; - assert [range_check_ptr + 4] = sq.v1.d1; - assert [range_check_ptr + 5] = sq.v1.d2; - assert [range_check_ptr + 6] = sq.v2.d0; - assert [range_check_ptr + 7] = sq.v2.d1; - assert [range_check_ptr + 8] = sq.v2.d2; - assert [range_check_ptr + 9] = sq.v3.d0; - assert [range_check_ptr + 10] = sq.v3.d1; - assert [range_check_ptr + 11] = sq.v3.d2; - assert [range_check_ptr + 12] = sq.v4.d0; - assert [range_check_ptr + 13] = sq.v4.d1; - assert [range_check_ptr + 14] = sq.v4.d2; - assert [range_check_ptr + 15] = sq.v5.d0; - assert [range_check_ptr + 16] = sq.v5.d1; - assert [range_check_ptr + 17] = sq.v5.d2; - assert [range_check_ptr + 18] = q_v.v0.low; - assert [range_check_ptr + 19] = q_v.v0.high; - assert [range_check_ptr + 20] = q_v.v1.low; - assert [range_check_ptr + 21] = q_v.v1.high; - assert [range_check_ptr + 22] = q_v.v2.low; - assert [range_check_ptr + 23] = q_v.v2.high; - assert [range_check_ptr + 24] = q_v.v3.low; - assert [range_check_ptr + 25] = q_v.v3.high; - assert [range_check_ptr + 26] = q_v.v4.low; - assert [range_check_ptr + 27] = q_v.v4.high; - assert [range_check_ptr + 28] = 6 * 3 * BASE_MIN_1 - ( - sq.v0.d0 + - sq.v0.d1 + - sq.v0.d2 + - sq.v1.d0 + - sq.v1.d1 + - sq.v1.d2 + - sq.v2.d0 + - sq.v2.d1 + - sq.v2.d2 + - sq.v3.d0 + - sq.v3.d1 + - sq.v3.d2 + - sq.v4.d0 + - sq.v4.d1 + - sq.v4.d2 + - sq.v5.d0 + - sq.v5.d1 + - sq.v5.d2 - ); - - tempvar range_check_ptr = range_check_ptr + 29; - - assert poseidon_ptr.input = PoseidonBuiltinState( - s0=v_tmp.v0.d0 * v_tmp.v0.d1, s1=continuable_hash, s2=two - ); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=v_tmp.v0.d2 * v_tmp.v1.d0, s1=poseidon_ptr[0].output.s0, s2=two - ); - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=v_tmp.v1.d1 * v_tmp.v1.d2, s1=poseidon_ptr[1].output.s0, s2=two - ); - - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=v_tmp.v2.d0 * v_tmp.v2.d1, s1=poseidon_ptr[2].output.s0, s2=two - ); - - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=v_tmp.v2.d2 * v_tmp.v3.d0, s1=poseidon_ptr[3].output.s0, s2=two - ); - - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=v_tmp.v3.d1 * v_tmp.v3.d2, s1=poseidon_ptr[4].output.s0, s2=two - ); - - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=v_tmp.v4.d0 * v_tmp.v4.d1, s1=poseidon_ptr[5].output.s0, s2=two - ); - - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=v_tmp.v4.d2 * v_tmp.v5.d0, s1=poseidon_ptr[6].output.s0, s2=two - ); - - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=v_tmp.v5.d1 * v_tmp.v5.d2, s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=x.v0.d0 * q_v.v0.low, s1=poseidon_ptr[8].output.s0, s2=two - ); - - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=x.v0.d1 * q_v.v0.high, s1=poseidon_ptr[9].output.s0, s2=two - ); - - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=x.v0.d2 * q_v.v1.low, s1=poseidon_ptr[10].output.s0, s2=two - ); - - assert poseidon_ptr[12].input = PoseidonBuiltinState( - s0=x.v1.d0 * q_v.v1.high, s1=poseidon_ptr[11].output.s0, s2=two - ); - - assert poseidon_ptr[13].input = PoseidonBuiltinState( - s0=x.v1.d1 * q_v.v2.low, s1=poseidon_ptr[12].output.s0, s2=two - ); - - assert poseidon_ptr[14].input = PoseidonBuiltinState( - s0=x.v1.d2 * q_v.v2.high, s1=poseidon_ptr[13].output.s0, s2=two - ); - - assert poseidon_ptr[15].input = PoseidonBuiltinState( - s0=x.v2.d0 * q_v.v3.low, s1=poseidon_ptr[14].output.s0, s2=two - ); - - assert poseidon_ptr[16].input = PoseidonBuiltinState( - s0=x.v2.d1 * q_v.v3.high, s1=poseidon_ptr[15].output.s0, s2=two - ); - - assert poseidon_ptr[17].input = PoseidonBuiltinState( - s0=x.v2.d2 * q_v.v4.low, s1=poseidon_ptr[16].output.s0, s2=two - ); - assert poseidon_ptr[18].input = PoseidonBuiltinState( - s0=x.v3.d0 * q_v.v4.high, s1=poseidon_ptr[17].output.s0, s2=two - ); - assert poseidon_ptr[19].input = PoseidonBuiltinState( - s0=x.v3.d1 * x.v3.d2, s1=poseidon_ptr[18].output.s0, s2=two - ); - assert poseidon_ptr[20].input = PoseidonBuiltinState( - s0=x.v4.d0 * x.v4.d1, s1=poseidon_ptr[19].output.s0, s2=two - ); - assert poseidon_ptr[21].input = PoseidonBuiltinState( - s0=x.v4.d2 * x.v5.d0, s1=poseidon_ptr[20].output.s0, s2=two - ); - assert poseidon_ptr[22].input = PoseidonBuiltinState( - s0=x.v5.d1 * x.v5.d2, s1=poseidon_ptr[21].output.s0, s2=two - ); - - tempvar x_of_z_v1 = UnreducedBigInt5( - d0=x.v1.d0 * z_pow1_5.z_1.d0, - d1=x.v1.d0 * z_pow1_5.z_1.d1 + x.v1.d1 * z_pow1_5.z_1.d0, - d2=x.v1.d0 * z_pow1_5.z_1.d2 + x.v1.d1 * z_pow1_5.z_1.d1 + x.v1.d2 * z_pow1_5.z_1.d0, - d3=x.v1.d1 * z_pow1_5.z_1.d2 + x.v1.d2 * z_pow1_5.z_1.d1, - d4=x.v1.d2 * z_pow1_5.z_1.d2, - ); - - tempvar x_of_z_v2 = UnreducedBigInt5( - d0=x.v2.d0 * z_pow1_5.z_2.d0, - d1=x.v2.d0 * z_pow1_5.z_2.d1 + x.v2.d1 * z_pow1_5.z_2.d0, - d2=x.v2.d0 * z_pow1_5.z_2.d2 + x.v2.d1 * z_pow1_5.z_2.d1 + x.v2.d2 * z_pow1_5.z_2.d0, - d3=x.v2.d1 * z_pow1_5.z_2.d2 + x.v2.d2 * z_pow1_5.z_2.d1, - d4=x.v2.d2 * z_pow1_5.z_2.d2, - ); - - tempvar x_of_z_v3 = UnreducedBigInt5( - d0=x.v3.d0 * z_pow1_5.z_3.d0, - d1=x.v3.d0 * z_pow1_5.z_3.d1 + x.v3.d1 * z_pow1_5.z_3.d0, - d2=x.v3.d0 * z_pow1_5.z_3.d2 + x.v3.d1 * z_pow1_5.z_3.d1 + x.v3.d2 * z_pow1_5.z_3.d0, - d3=x.v3.d1 * z_pow1_5.z_3.d2 + x.v3.d2 * z_pow1_5.z_3.d1, - d4=x.v3.d2 * z_pow1_5.z_3.d2, - ); - - tempvar x_of_z_v4 = UnreducedBigInt5( - d0=x.v4.d0 * z_pow1_5.z_4.d0, - d1=x.v4.d0 * z_pow1_5.z_4.d1 + x.v4.d1 * z_pow1_5.z_4.d0, - d2=x.v4.d0 * z_pow1_5.z_4.d2 + x.v4.d1 * z_pow1_5.z_4.d1 + x.v4.d2 * z_pow1_5.z_4.d0, - d3=x.v4.d1 * z_pow1_5.z_4.d2 + x.v4.d2 * z_pow1_5.z_4.d1, - d4=x.v4.d2 * z_pow1_5.z_4.d2, - ); - - tempvar x_of_z_v5 = UnreducedBigInt5( - d0=x.v5.d0 * z_pow1_5.z_5.d0, - d1=x.v5.d0 * z_pow1_5.z_5.d1 + x.v5.d1 * z_pow1_5.z_5.d0, - d2=x.v5.d0 * z_pow1_5.z_5.d2 + x.v5.d1 * z_pow1_5.z_5.d1 + x.v5.d2 * z_pow1_5.z_5.d0, - d3=x.v5.d1 * z_pow1_5.z_5.d2 + x.v5.d2 * z_pow1_5.z_5.d1, - d4=x.v5.d2 * z_pow1_5.z_5.d2, - ); - - let x_of_z = reduce_5( - UnreducedBigInt5( - d0=x.v0.d0 + x_of_z_v1.d0 + x_of_z_v2.d0 + x_of_z_v3.d0 + x_of_z_v4.d0 + - x_of_z_v5.d0, - d1=x.v0.d1 + x_of_z_v1.d1 + x_of_z_v2.d1 + x_of_z_v3.d1 + x_of_z_v4.d1 + - x_of_z_v5.d1, - d2=x.v0.d2 + x_of_z_v1.d2 + x_of_z_v2.d2 + x_of_z_v3.d2 + x_of_z_v4.d2 + - x_of_z_v5.d2, - d3=x_of_z_v1.d3 + x_of_z_v2.d3 + x_of_z_v3.d3 + x_of_z_v4.d3 + x_of_z_v5.d3, - d4=x_of_z_v1.d4 + x_of_z_v2.d4 + x_of_z_v3.d4 + x_of_z_v4.d4 + x_of_z_v5.d4, - ), - ); - - tempvar y_of_z_v1 = UnreducedBigInt5( - d0=v_tmp.v1.d0 * z_pow1_5.z_1.d0, - d1=v_tmp.v1.d0 * z_pow1_5.z_1.d1 + v_tmp.v1.d1 * z_pow1_5.z_1.d0, - d2=v_tmp.v1.d0 * z_pow1_5.z_1.d2 + v_tmp.v1.d1 * z_pow1_5.z_1.d1 + v_tmp.v1.d2 * - z_pow1_5.z_1.d0, - d3=v_tmp.v1.d1 * z_pow1_5.z_1.d2 + v_tmp.v1.d2 * z_pow1_5.z_1.d1, - d4=v_tmp.v1.d2 * z_pow1_5.z_1.d2, - ); - - tempvar y_of_z_v2 = UnreducedBigInt5( - d0=v_tmp.v2.d0 * z_pow1_5.z_2.d0, - d1=v_tmp.v2.d0 * z_pow1_5.z_2.d1 + v_tmp.v2.d1 * z_pow1_5.z_2.d0, - d2=v_tmp.v2.d0 * z_pow1_5.z_2.d2 + v_tmp.v2.d1 * z_pow1_5.z_2.d1 + v_tmp.v2.d2 * - z_pow1_5.z_2.d0, - d3=v_tmp.v2.d1 * z_pow1_5.z_2.d2 + v_tmp.v2.d2 * z_pow1_5.z_2.d1, - d4=v_tmp.v2.d2 * z_pow1_5.z_2.d2, - ); - - tempvar y_of_z_v3 = UnreducedBigInt5( - d0=v_tmp.v3.d0 * z_pow1_5.z_3.d0, - d1=v_tmp.v3.d0 * z_pow1_5.z_3.d1 + v_tmp.v3.d1 * z_pow1_5.z_3.d0, - d2=v_tmp.v3.d0 * z_pow1_5.z_3.d2 + v_tmp.v3.d1 * z_pow1_5.z_3.d1 + v_tmp.v3.d2 * - z_pow1_5.z_3.d0, - d3=v_tmp.v3.d1 * z_pow1_5.z_3.d2 + v_tmp.v3.d2 * z_pow1_5.z_3.d1, - d4=v_tmp.v3.d2 * z_pow1_5.z_3.d2, - ); - - tempvar y_of_z_v4 = UnreducedBigInt5( - d0=v_tmp.v4.d0 * z_pow1_5.z_4.d0, - d1=v_tmp.v4.d0 * z_pow1_5.z_4.d1 + v_tmp.v4.d1 * z_pow1_5.z_4.d0, - d2=v_tmp.v4.d0 * z_pow1_5.z_4.d2 + v_tmp.v4.d1 * z_pow1_5.z_4.d1 + v_tmp.v4.d2 * - z_pow1_5.z_4.d0, - d3=v_tmp.v4.d1 * z_pow1_5.z_4.d2 + v_tmp.v4.d2 * z_pow1_5.z_4.d1, - d4=v_tmp.v4.d2 * z_pow1_5.z_4.d2, - ); - - tempvar y_of_z_v5 = UnreducedBigInt5( - d0=v_tmp.v5.d0 * z_pow1_5.z_5.d0, - d1=v_tmp.v5.d0 * z_pow1_5.z_5.d1 + v_tmp.v5.d1 * z_pow1_5.z_5.d0, - d2=v_tmp.v5.d0 * z_pow1_5.z_5.d2 + v_tmp.v5.d1 * z_pow1_5.z_5.d1 + v_tmp.v5.d2 * - z_pow1_5.z_5.d0, - d3=v_tmp.v5.d1 * z_pow1_5.z_5.d2 + v_tmp.v5.d2 * z_pow1_5.z_5.d1, - d4=v_tmp.v5.d2 * z_pow1_5.z_5.d2, - ); - - let y_of_z = reduce_5( - UnreducedBigInt5( - d0=v_tmp.v0.d0 + y_of_z_v1.d0 + y_of_z_v2.d0 + y_of_z_v3.d0 + y_of_z_v4.d0 + - y_of_z_v5.d0, - d1=v_tmp.v0.d1 + y_of_z_v1.d1 + y_of_z_v2.d1 + y_of_z_v3.d1 + y_of_z_v4.d1 + - y_of_z_v5.d1, - d2=v_tmp.v0.d2 + y_of_z_v1.d2 + y_of_z_v2.d2 + y_of_z_v3.d2 + y_of_z_v4.d2 + - y_of_z_v5.d2, - d3=y_of_z_v1.d3 + y_of_z_v2.d3 + y_of_z_v3.d3 + y_of_z_v4.d3 + y_of_z_v5.d3, - d4=y_of_z_v1.d4 + y_of_z_v2.d4 + y_of_z_v3.d4 + y_of_z_v4.d4 + y_of_z_v5.d4, - ), - ); - let xy_acc = reduce_5( - UnreducedBigInt5( - d0=x_of_z.d0 * y_of_z.d0, - d1=x_of_z.d0 * y_of_z.d1 + x_of_z.d1 * y_of_z.d0, - d2=x_of_z.d0 * y_of_z.d2 + x_of_z.d1 * y_of_z.d1 + x_of_z.d2 * y_of_z.d0, - d3=x_of_z.d1 * y_of_z.d2 + x_of_z.d2 * y_of_z.d1, - d4=x_of_z.d2 * y_of_z.d2, - ), - ); - - tempvar poseidon_ptr = poseidon_ptr + PoseidonBuiltin.SIZE * 23; - let continuable_hash = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let random_linear_combination_coeff = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s1; - - assert bitwise_ptr.x = random_linear_combination_coeff; - assert bitwise_ptr.y = BASE_MIN_1; - tempvar c_i = bitwise_ptr.x_and_y; - let bitwise_ptr = bitwise_ptr + BitwiseBuiltin.SIZE; - - local poly_acc_sqf: PolyAccSquare6 = PolyAccSquare6( - xy=UnreducedBigInt3( - d0=poly_acc_sq.xy.d0 + c_i * xy_acc.d0, - d1=poly_acc_sq.xy.d1 + c_i * xy_acc.d1, - d2=poly_acc_sq.xy.d2 + c_i * xy_acc.d2, - ), - q=E5full( - Uint256( - c_i * q_v.v0.low + poly_acc_sq.q.v0.low, - c_i * q_v.v0.high + poly_acc_sq.q.v0.high, - ), - Uint256( - c_i * q_v.v1.low + poly_acc_sq.q.v1.low, - c_i * q_v.v1.high + poly_acc_sq.q.v1.high, - ), - Uint256( - c_i * q_v.v2.low + poly_acc_sq.q.v2.low, - c_i * q_v.v2.high + poly_acc_sq.q.v2.high, - ), - Uint256( - c_i * q_v.v3.low + poly_acc_sq.q.v3.low, - c_i * q_v.v3.high + poly_acc_sq.q.v3.high, - ), - Uint256( - c_i * q_v.v4.low + poly_acc_sq.q.v4.low, - c_i * q_v.v4.high + poly_acc_sq.q.v4.high, - ), - ), - r=poly_acc_sq.r + c_i, - ); - let poly_acc_sq = &poly_acc_sqf; - - return &sq; - } - - func n_square_torus{ - range_check_ptr, - bitwise_ptr: BitwiseBuiltin*, - poseidon_ptr: PoseidonBuiltin*, - z_pow1_5_ptr: ZPowers5*, - continuable_hash: felt, - poly_acc_sq: PolyAccSquare6*, - }(x: E6full*, n: felt) -> E6full* { - if (n == 0) { - return x; - } else { - let res = square_torus(x); - return n_square_torus(res, n - 1); - } - } -} - -func gnark_to_v{range_check_ptr}(x: E6*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - local res: E6full = E6full( - BigInt3( - x.b0.a0.d0 - 9 * x.b0.a1.d0, x.b0.a0.d1 - 9 * x.b0.a1.d1, x.b0.a0.d2 - 9 * x.b0.a1.d2 - ), - BigInt3( - x.b1.a0.d0 - 9 * x.b1.a1.d0, x.b1.a0.d1 - 9 * x.b1.a1.d1, x.b1.a0.d2 - 9 * x.b1.a1.d2 - ), - BigInt3( - x.b2.a0.d0 - 9 * x.b2.a1.d0, x.b2.a0.d1 - 9 * x.b2.a1.d1, x.b2.a0.d2 - 9 * x.b2.a1.d2 - ), - BigInt3(x.b0.a1.d0, x.b0.a1.d1, x.b0.a1.d2), - BigInt3(x.b1.a1.d0, x.b1.a1.d1, x.b1.a1.d2), - BigInt3(x.b2.a1.d0, x.b2.a1.d1, x.b2.a1.d2), - ); - return &res; -} -func gnark_to_v_reduced{range_check_ptr}(x: E6*) -> E6full* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let v0 = reduce_3( - UnreducedBigInt3( - x.b0.a0.d0 - 9 * x.b0.a1.d0, x.b0.a0.d1 - 9 * x.b0.a1.d1, x.b0.a0.d2 - 9 * x.b0.a1.d2 - ), - ); - - let v1 = reduce_3( - UnreducedBigInt3( - x.b1.a0.d0 - 9 * x.b1.a1.d0, x.b1.a0.d1 - 9 * x.b1.a1.d1, x.b1.a0.d2 - 9 * x.b1.a1.d2 - ), - ); - - let v2 = reduce_3( - UnreducedBigInt3( - x.b2.a0.d0 - 9 * x.b2.a1.d0, x.b2.a0.d1 - 9 * x.b2.a1.d1, x.b2.a0.d2 - 9 * x.b2.a1.d2 - ), - ); - - local res: E6full = E6full(v0, v1, v2, x.b0.a1, x.b1.a1, x.b2.a1); - return &res; -} - -func v_to_gnark_reduced{range_check_ptr}(x: E6full) -> E6* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - - let b0a0 = reduce_3( - UnreducedBigInt3(x.v0.d0 + 9 * x.v3.d0, x.v0.d1 + 9 * x.v3.d1, x.v0.d2 + 9 * x.v3.d2) - ); - - let b1a0 = reduce_3( - UnreducedBigInt3(x.v1.d0 + 9 * x.v4.d0, x.v1.d1 + 9 * x.v4.d1, x.v1.d2 + 9 * x.v4.d2) - ); - - let b2a0 = reduce_3( - UnreducedBigInt3(x.v2.d0 + 9 * x.v5.d0, x.v2.d1 + 9 * x.v5.d1, x.v2.d2 + 9 * x.v5.d2) - ); - - local b0: E2 = E2(b0a0, x.v3); - local b1: E2 = E2(b1a0, x.v4); - local b2: E2 = E2(b2a0, x.v5); - - local res: E6 = E6(&b0, &b1, &b2); - return &res; -} - -func eval_E6_plus_v_unreduced{range_check_ptr}( - e6: E6DirectUnreduced, v: felt, powers: ZPowers5* -) -> UnreducedBigInt5 { - alloc_locals; - let e0 = e6.v0; - let v1 = reduce_3(e6.v1); - let v2 = reduce_3(e6.v2); - let v3 = reduce_3(e6.v3); - let v4 = reduce_3(e6.v4); - let v5 = reduce_3(e6.v5); - - let (e1) = bigint_mul(BigInt3(v1.d0 + v, v1.d1, v1.d2), powers.z_1); - let (e2) = bigint_mul(v2, powers.z_2); - let (e3) = bigint_mul(v3, powers.z_3); - let (e4) = bigint_mul(v4, powers.z_4); - let (e5) = bigint_mul(v5, powers.z_5); - - let res = UnreducedBigInt5( - d0=e0.d0 + e1.d0 + e2.d0 + e3.d0 + e4.d0 + e5.d0, - d1=e0.d1 + e1.d1 + e2.d1 + e3.d1 + e4.d1 + e5.d1, - d2=e0.d2 + e1.d2 + e2.d2 + e3.d2 + e4.d2 + e5.d2, - d3=e1.d3 + e2.d3 + e3.d3 + e4.d3 + e5.d3, - d4=e1.d4 + e2.d4 + e3.d4 + e4.d4 + e5.d4, - ); - return res; -} - -func eval_E5{range_check_ptr}(e5: E5full, powers: ZPowers5*) -> BigInt3 { - alloc_locals; - let (v0) = unrededucedUint256_to_BigInt3(e5.v0); - let (v1) = unrededucedUint256_to_BigInt3(e5.v1); - let (v2) = unrededucedUint256_to_BigInt3(e5.v2); - let (v3) = unrededucedUint256_to_BigInt3(e5.v3); - let (v4) = unrededucedUint256_to_BigInt3(e5.v4); - - let e0 = v0; - let (e1) = bigint_mul(v1, powers.z_1); - let (e2) = bigint_mul(v2, powers.z_2); - let (e3) = bigint_mul(v3, powers.z_3); - let (e4) = bigint_mul(v4, powers.z_4); - - let res = reduce_5( - UnreducedBigInt5( - d0=e0.d0 + e1.d0 + e2.d0 + e3.d0 + e4.d0, - d1=e0.d1 + e1.d1 + e2.d1 + e3.d1 + e4.d1, - d2=e0.d2 + e1.d2 + e2.d2 + e3.d2 + e4.d2, - d3=e1.d3 + e2.d3 + e3.d3 + e4.d3, - d4=e1.d4 + e2.d4 + e3.d4 + e4.d4, - ), - ); - return res; -} - -func get_powers_of_z5{range_check_ptr}(z: BigInt3) -> ZPowers5* { - alloc_locals; - let (__fp__, _) = get_fp_and_pc(); - let z_2 = fq_bigint3.mul(z, z); - let z_3 = fq_bigint3.mul(z_2, z); - let z_4 = fq_bigint3.mul(z_3, z); - let z_5 = fq_bigint3.mul(z_4, z); - - local res: ZPowers5 = ZPowers5(z_1=z, z_2=z_2, z_3=z_3, z_4=z_4, z_5=z_5); - return &res; -} - -func eval_irreducible_poly6{range_check_ptr}(z_3: BigInt3, z_6: BigInt3) -> BigInt3 { - alloc_locals; - local v3: BigInt3 = BigInt3( - 60193888514187762220203317, 27625954992973055882053025, 3656382694611191768777988 - ); // -18 % p - let (e3) = bigint_mul(v3, z_3); - - let v6 = z_6; - - let res = reduce_5( - UnreducedBigInt5( - d0=82 + e3.d0 + v6.d0, d1=e3.d1 + v6.d1, d2=e3.d2 + v6.d2, d3=e3.d3, d4=e3.d4 - ), - ); - return res; -} diff --git a/archive_tmp/fp6.cairo b/archive_tmp/fp6.cairo deleted file mode 100644 index f98e4a79..00000000 --- a/archive_tmp/fp6.cairo +++ /dev/null @@ -1,432 +0,0 @@ -from src.bn254.towers.e6 import ( - eval_E6_plus_v_unreduced, - eval_E5, - eval_irreducible_poly6, - ZPowers5, - E5full, - E6DirectUnreduced, - PolyAcc6, -) -from src.bn254.fq import ( - fq_bigint3, - BigInt3, - bigint_mul, - UnreducedBigInt3, - UnreducedBigInt5, - Uint256, - verify_zero5, - felt_to_uint384, -) -from starkware.cairo.common.registers import get_fp_and_pc - -from starkware.cairo.common.cairo_builtins import ( - ModBuiltin, - UInt384, - PoseidonBuiltin, - BitwiseBuiltin, -) -from starkware.cairo.common.poseidon_state import PoseidonBuiltinState - -const N_LIMBS = 4; -const BASE = 2 ** 96; -const CURVE = 'bn254'; -const STARK_MIN_ONE_D2 = 576460752303423505; - -struct E6full { - v0: BigInt3, - v1: BigInt3, - v2: BigInt3, - v3: BigInt3, - v4: BigInt3, - v5: BigInt3, -} - -// xy_offset : offset in the range_check_96 ptr. Represents the start of an UInt384 -// R_offset : offset in the range_check_96 ptr. Represents the start of 6 * UInt384 -struct PolyAcc66 { - xy_offset: felt, - R_offset: felt, -} - -// r is known in advance to be 1* v -struct PolyAccSquare6 { - xy: UnreducedBigInt3, - q: E5full, - r: felt, -} - -func verify_6th_extension_tricks{ - range_check_ptr, poly_acc: PolyAcc6*, poly_acc_sq: PolyAccSquare6*, z_pow1_5_ptr: ZPowers5* -}() { - alloc_locals; - let sum_r_of_z = eval_E6_plus_v_unreduced(poly_acc.r, poly_acc_sq.r, z_pow1_5_ptr); - let sum_q_of_z = eval_E5( - E5full( - Uint256( - poly_acc.q.v0.low + poly_acc_sq.q.v0.low, poly_acc.q.v0.high + poly_acc_sq.q.v0.high - ), - Uint256( - poly_acc.q.v1.low + poly_acc_sq.q.v1.low, poly_acc.q.v1.high + poly_acc_sq.q.v1.high - ), - Uint256( - poly_acc.q.v2.low + poly_acc_sq.q.v2.low, poly_acc.q.v2.high + poly_acc_sq.q.v2.high - ), - Uint256( - poly_acc.q.v3.low + poly_acc_sq.q.v3.low, poly_acc.q.v3.high + poly_acc_sq.q.v3.high - ), - Uint256( - poly_acc.q.v4.low + poly_acc_sq.q.v4.low, poly_acc.q.v4.high + poly_acc_sq.q.v4.high - ), - ), - z_pow1_5_ptr, - ); - let z_6 = fq_bigint3.mul(z_pow1_5_ptr.z_1, z_pow1_5_ptr.z_5); - let p_of_z = eval_irreducible_poly6(z_pow1_5_ptr.z_3, z_6); - let (sum_qP_of_z) = bigint_mul(sum_q_of_z, p_of_z); - - verify_zero5( - UnreducedBigInt5( - d0=poly_acc.xy.d0 + poly_acc_sq.xy.d0 - sum_qP_of_z.d0 - sum_r_of_z.d0, - d1=poly_acc.xy.d1 + poly_acc_sq.xy.d1 - sum_qP_of_z.d1 - sum_r_of_z.d1, - d2=poly_acc.xy.d2 + poly_acc_sq.xy.d2 - sum_qP_of_z.d2 - sum_r_of_z.d2, - d3=-sum_qP_of_z.d3 - sum_r_of_z.d3, - d4=-sum_qP_of_z.d4 - sum_r_of_z.d4, - ), - ); - return (); -} - -from starkware.cairo.common.alloc import alloc - -// x_offset: offset in the range_check_96 ptr. x 6*4 limbs are between x_offset and x_offset + 24 -// y_offset: offset in the range_check_96 ptr. y 6*4 limbs are between y_offset and y_offset + 24 -func mul_trick_e6{ - range_check96_ptr: felt*, - values_ptr: UInt384*, - n_u384: felt, - poseidon_ptr: PoseidonBuiltin*, - continuable_hash: felt, - poly_acc6: PolyAcc66*, - mul_offsets_ptr: felt*, - add_offsets_ptr: felt*, - mul_mod_n: felt, - add_mod_n: felt, -}(x_offset: felt, y_offset: felt) -> (res_offset: felt) { - alloc_locals; - tempvar two = 2; - tempvar four = 4; - - let r_start: UInt384* = cast(range_check96_ptr, UInt384*); - tempvar res_offset = n_u384 * four; - %{ - from src.hints.e6 import mul_trick - from src.hints.fq import pack_bigint_array, fill_bigint_array - - x = pack_bigint_array(ids.values_ptr, ids.N_LIMBS, ids.BASE, 6, ids.x_offset) - y = pack_bigint_array(ids.values_ptr, ids.N_LIMBS, ids.BASE, 6, ids.y_offset) - q, r = mul_trick(x, y, ids.CURVE) - - fill_bigint_array(r, ids.r_start, ids.N_LIMBS, ids.BASE, 0) - %} - - assert poseidon_ptr.input = PoseidonBuiltinState( - s0=range_check96_ptr[0] * range_check96_ptr[1], s1=continuable_hash, s2=two - ); - assert poseidon_ptr[1].input = PoseidonBuiltinState( - s0=range_check96_ptr[2] * range_check96_ptr[3], s1=poseidon_ptr.output.s0, s2=two - ); - - assert poseidon_ptr[2].input = PoseidonBuiltinState( - s0=range_check96_ptr[4] * range_check96_ptr[5], s1=poseidon_ptr[1].output.s0, s2=two - ); - assert poseidon_ptr[3].input = PoseidonBuiltinState( - s0=range_check96_ptr[6] * range_check96_ptr[7], s1=poseidon_ptr[2].output.s0, s2=two - ); - assert poseidon_ptr[4].input = PoseidonBuiltinState( - s0=range_check96_ptr[8] * range_check96_ptr[9], s1=poseidon_ptr[3].output.s0, s2=two - ); - assert poseidon_ptr[5].input = PoseidonBuiltinState( - s0=range_check96_ptr[10] * range_check96_ptr[11], s1=poseidon_ptr[4].output.s0, s2=two - ); - assert poseidon_ptr[6].input = PoseidonBuiltinState( - s0=range_check96_ptr[12] * range_check96_ptr[13], s1=poseidon_ptr[5].output.s0, s2=two - ); - assert poseidon_ptr[7].input = PoseidonBuiltinState( - s0=range_check96_ptr[14] * range_check96_ptr[15], s1=poseidon_ptr[6].output.s0, s2=two - ); - assert poseidon_ptr[8].input = PoseidonBuiltinState( - s0=range_check96_ptr[16] * range_check96_ptr[17], s1=poseidon_ptr[7].output.s0, s2=two - ); - assert poseidon_ptr[9].input = PoseidonBuiltinState( - s0=range_check96_ptr[18] * range_check96_ptr[19], s1=poseidon_ptr[8].output.s0, s2=two - ); - assert poseidon_ptr[10].input = PoseidonBuiltinState( - s0=range_check96_ptr[20] * range_check96_ptr[21], s1=poseidon_ptr[9].output.s0, s2=two - ); - assert poseidon_ptr[11].input = PoseidonBuiltinState( - s0=range_check96_ptr[22] * range_check96_ptr[23], s1=poseidon_ptr[10].output.s0, s2=two - ); - - // tempvar random_linear_combination_coeff: felt = poseidon_ptr[11].output.s1; - tempvar random_linear_combination_coeff = 1; - local c_d0; - local c_d1; - local c_d2; - %{ - from src.hints.fq import bigint_split - limbs = bigint_split(ids.random_linear_combination_coeff, ids.N_LIMBS, ids.BASE) - assert limbs[3] == 0 - ids.c_d0, ids.c_d1, ids.c_d2 = limbs[0], limbs[1], limbs[2] - %} - - assert random_linear_combination_coeff = c_d0 + c_d1 * 2 ** 96 + c_d2 * (2 ** 96) ** 2; - - if (c_d2 == STARK_MIN_ONE_D2) { - assert c_d0 = 0; - assert c_d1 = 0; - } - assert values_ptr[n_u384 + 6].d0 = c_d0; - assert values_ptr[n_u384 + 6].d1 = c_d1; - assert values_ptr[n_u384 + 6].d2 = c_d2; - assert values_ptr[n_u384 + 6].d3 = 0; - - tempvar c_i_offset = four * (n_u384 + 6); - tempvar x_offset = four * x_offset; - tempvar y_offset = four * y_offset; - tempvar last_offset = c_i_offset + four; - - // Compute X(Z) - tempvar zero = 0; // Z - tempvar eight = 8; // Z^3 - tempvar twelve = 12; // Z^4 - tempvar sixt = 16; // Z^5 - - // x1*Z - assert mul_offsets_ptr[0] = x_offset + four; - assert mul_offsets_ptr[1] = zero; - assert mul_offsets_ptr[2] = last_offset; - // x2*Z^2 - tempvar x2z2 = last_offset + four; - assert mul_offsets_ptr[3] = x_offset + 8; - assert mul_offsets_ptr[4] = four; - assert mul_offsets_ptr[5] = x2z2; - // x3*Z^3 - tempvar x3z3 = last_offset + 8; - assert mul_offsets_ptr[6] = x_offset + 12; - assert mul_offsets_ptr[7] = eight; - assert mul_offsets_ptr[8] = x3z3; - // x4*Z^4 - tempvar x4z4 = last_offset + 12; - assert mul_offsets_ptr[9] = x_offset + 16; - assert mul_offsets_ptr[10] = twelve; - assert mul_offsets_ptr[11] = x4z4; - // x5*Z^5 - tempvar x5z5 = last_offset + 16; - assert mul_offsets_ptr[12] = x_offset + 20; - assert mul_offsets_ptr[13] = sixt; - assert mul_offsets_ptr[14] = x5z5; - - // x0 + x1*Z - tempvar x0x1z = last_offset + 20; - assert add_offsets_ptr[0] = x_offset; - assert add_offsets_ptr[1] = last_offset; - assert add_offsets_ptr[2] = x0x1z; - // ( x0 + x1*Z) + x2*Z^2 - tempvar x0x1zx2z2 = last_offset + 24; - assert add_offsets_ptr[3] = x0x1z; - assert add_offsets_ptr[4] = x2z2; - assert add_offsets_ptr[5] = x0x1zx2z2; - // (x0 + x1*Z + x2*Z^2) + x3*Z^3 - tempvar x0x1zx2z2x3z3 = last_offset + 28; - assert add_offsets_ptr[6] = x0x1zx2z2; - assert add_offsets_ptr[7] = x3z3; - assert add_offsets_ptr[8] = x0x1zx2z2x3z3; - // (x0 + x1*Z + x2*Z^2 + x3*Z^3) + x4*Z^4 - tempvar x01234z1234 = last_offset + 32; - assert add_offsets_ptr[9] = x0x1zx2z2x3z3; - assert add_offsets_ptr[10] = x4z4; - assert add_offsets_ptr[11] = x01234z1234; - // (x0 + x1*Z + x2*Z^2 + x3*Z^3 + x4*Z^4) + x5*Z^5 - tempvar x_of_z = x01234z1234 + four; - assert add_offsets_ptr[12] = x01234z1234; - assert add_offsets_ptr[13] = x5z5; - assert add_offsets_ptr[14] = x_of_z; - - // Compute Y(Z) - // y1*Z - tempvar y1z = last_offset + 40; - assert mul_offsets_ptr[15] = y_offset + four; - assert mul_offsets_ptr[16] = zero; - assert mul_offsets_ptr[17] = y1z; - // y2*Z^2 - tempvar y2z2 = last_offset + 44; - assert mul_offsets_ptr[18] = y_offset + 8; - assert mul_offsets_ptr[19] = four; - assert mul_offsets_ptr[20] = y2z2; - // y3*Z^3 - tempvar y3z3 = last_offset + 48; - assert mul_offsets_ptr[21] = y_offset + 12; - assert mul_offsets_ptr[22] = eight; - assert mul_offsets_ptr[23] = y3z3; - // y4*Z^4 - tempvar y4z4 = last_offset + 52; - assert mul_offsets_ptr[24] = y_offset + 16; - assert mul_offsets_ptr[25] = twelve; - assert mul_offsets_ptr[26] = y4z4; - // y5*Z^5 - tempvar y5z5 = last_offset + 56; - assert mul_offsets_ptr[27] = y_offset + 20; - assert mul_offsets_ptr[28] = sixt; - assert mul_offsets_ptr[29] = y5z5; - - // y0 + y1*Z - tempvar y01z1 = last_offset + 60; - assert add_offsets_ptr[15] = y_offset; - assert add_offsets_ptr[16] = y1z; - assert add_offsets_ptr[17] = y01z1; - // (y0 + y1*Z) + y2*Z^2 - tempvar y012z12 = last_offset + 64; - assert add_offsets_ptr[18] = y01z1; - assert add_offsets_ptr[19] = y2z2; - assert add_offsets_ptr[20] = y012z12; - // (y0 + y1*Z + y2*Z^2) + y3*Z^3 - tempvar y0123z123 = last_offset + 68; - assert add_offsets_ptr[21] = y012z12; - assert add_offsets_ptr[22] = y3z3; - assert add_offsets_ptr[23] = y0123z123; - // (y0 + y1*Z + y2*Z^2 + y3*Z^3) + y4*Z^4 - tempvar y01234z1234 = last_offset + 72; - assert add_offsets_ptr[24] = y0123z123; - assert add_offsets_ptr[25] = y4z4; - assert add_offsets_ptr[26] = y01234z1234; - // (y0 + y1*Z + y2*Z^2 + y3*Z^3 + y4*Z^4) + y5*Z^5 - tempvar y_of_z = last_offset + 76; - assert add_offsets_ptr[27] = y01234z1234; - assert add_offsets_ptr[28] = y5z5; - assert add_offsets_ptr[29] = y_of_z; - - // Compute X(Z) * Y(Z) - tempvar xy_of_z = last_offset + 80; - assert mul_offsets_ptr[30] = x_of_z; - assert mul_offsets_ptr[31] = y_of_z; - assert mul_offsets_ptr[32] = xy_of_z; - - // Compute (X(Z) * Y(Z)) * c_i - tempvar ci_xy_of_z = last_offset + 84; - assert mul_offsets_ptr[33] = xy_of_z; - assert mul_offsets_ptr[34] = c_i_offset; - assert mul_offsets_ptr[35] = ci_xy_of_z; - - // Compute (X(Z) * Y(Z)) * c_i + PolyAcc6.xy - assert add_offsets_ptr[30] = ci_xy_of_z; - assert add_offsets_ptr[31] = poly_acc6.xy_offset; - assert add_offsets_ptr[32] = last_offset + 88; - - // Compute c_i * R + PolyAcc6.R - - // Compute c_i * r0 - tempvar ci_r0 = last_offset + 92; - assert mul_offsets_ptr[36] = c_i_offset; - assert mul_offsets_ptr[37] = res_offset; - assert mul_offsets_ptr[38] = ci_r0; - // Compute c_i * r1 - tempvar ci_r1 = last_offset + 96; - assert mul_offsets_ptr[39] = c_i_offset; - assert mul_offsets_ptr[40] = res_offset + 4; - assert mul_offsets_ptr[41] = ci_r1; - // Compute c_i * r2 - tempvar ci_r2 = last_offset + 100; - assert mul_offsets_ptr[42] = c_i_offset; - assert mul_offsets_ptr[43] = res_offset + 8; - assert mul_offsets_ptr[44] = ci_r2; - // Compute c_i * r3 - tempvar ci_r3 = last_offset + 104; - assert mul_offsets_ptr[45] = c_i_offset; - assert mul_offsets_ptr[46] = res_offset + 12; - assert mul_offsets_ptr[47] = ci_r3; - // Compute c_i * r4 - tempvar ci_r4 = last_offset + 108; - assert mul_offsets_ptr[48] = c_i_offset; - assert mul_offsets_ptr[49] = res_offset + 16; - assert mul_offsets_ptr[50] = ci_r4; - // Compute c_i * r5 - tempvar ci_r5 = last_offset + 112; - assert mul_offsets_ptr[51] = c_i_offset; - assert mul_offsets_ptr[52] = res_offset + 20; - assert mul_offsets_ptr[53] = ci_r5; - - // (c_i * r0) + PolyAcc6.R.r0 - assert add_offsets_ptr[33] = ci_r0; - assert add_offsets_ptr[34] = poly_acc6.R_offset; - assert add_offsets_ptr[35] = last_offset + 116; - // (c_i * r1) + PolyAcc6.R.r1 - assert add_offsets_ptr[36] = ci_r1; - assert add_offsets_ptr[37] = poly_acc6.R_offset + four; - assert add_offsets_ptr[38] = last_offset + 120; - // (c_i * r2) + PolyAcc6.R.r2 - assert add_offsets_ptr[39] = ci_r2; - assert add_offsets_ptr[40] = poly_acc6.R_offset + 8; - assert add_offsets_ptr[41] = last_offset + 124; - // (c_i * r3) + PolyAcc6.R.r3 - assert add_offsets_ptr[42] = ci_r3; - assert add_offsets_ptr[43] = poly_acc6.R_offset + 12; - assert add_offsets_ptr[44] = last_offset + 128; - // (c_i * r4) + PolyAcc6.R.r4 - assert add_offsets_ptr[45] = ci_r4; - assert add_offsets_ptr[46] = poly_acc6.R_offset + 16; - assert add_offsets_ptr[47] = last_offset + 132; - // (c_i * r5) + PolyAcc6.R.r5 - assert add_offsets_ptr[48] = ci_r5; - assert add_offsets_ptr[49] = poly_acc6.R_offset + 20; - assert add_offsets_ptr[50] = last_offset + 136; - - assert [range_check96_ptr + 6 * UInt384.SIZE + 36 * UInt384.SIZE] = STARK_MIN_ONE_D2 - c_d2; - tempvar range_check96_ptr = range_check96_ptr + 6 * UInt384.SIZE + 36 * UInt384.SIZE + 1; - // tempvar poseidon_ptr = poseidon_ptr + 12 * PoseidonBuiltin.SIZE; // 12 Poseidon - // let continuable_hash = [poseidon_ptr - PoseidonBuiltin.SIZE].output.s0; - let continuable_hash = 0; - - tempvar poly_acc6 = new PolyAcc66(xy_offset=last_offset + 88, R_offset=last_offset + 112); - tempvar mul_offsets_ptr = mul_offsets_ptr + 18 * 3; // 18 MULs - tempvar add_offsets_ptr = add_offsets_ptr + 17 * 3; // 17 ADDs - tempvar mul_mod_n = mul_mod_n + 18; - tempvar add_mod_n = add_mod_n + 17; - - return (res_offset,); -} - -// Copies len field elements from src to dst. -func memcpy(dst: felt*, src: felt*, len) { - struct LoopFrame { - dst: felt*, - src: felt*, - } - - if (len == 0) { - return (); - } - - %{ vm_enter_scope({'n': ids.len}) %} - tempvar frame = LoopFrame(dst=dst, src=src); - - loop: - let frame = [cast(ap - LoopFrame.SIZE, LoopFrame*)]; - assert [frame.dst] = [frame.src]; - - let continue_copying = [ap]; - // Reserve space for continue_copying. - let next_frame = cast(ap + 1, LoopFrame*); - next_frame.dst = frame.dst + 1, ap++; - next_frame.src = frame.src + 1, ap++; - %{ - n -= 1 - ids.continue_copying = 1 if n > 0 else 0 - %} - static_assert next_frame + LoopFrame.SIZE == ap + 1; - jmp loop if continue_copying != 0, ap++; - // Assert that the loop executed len times. - len = cast(next_frame.src, felt) - cast(src, felt); - - %{ vm_exit_scope() %} - return (); -} diff --git a/src/precompiled_circuits/bn254_ec.py b/src/precompiled_circuits/bn254_ec.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/precompiled_circuits/bls12_381_ec.py b/src/precompiled_circuits/ec.py similarity index 100% rename from src/precompiled_circuits/bls12_381_ec.py rename to src/precompiled_circuits/ec.py diff --git a/tests/python_tests/bigint_poly_eval.py b/tests/python_tests/bigint_poly_eval.py deleted file mode 100644 index f4f9f9cd..00000000 --- a/tests/python_tests/bigint_poly_eval.py +++ /dev/null @@ -1,22 +0,0 @@ -import random - -p = 21888242871839275222246405745257275088696311157297823662689037894645226208583 - - -def split_128(a): - """Takes in value, returns uint256-ish tuple.""" - return (a & ((1 << 128) - 1), a >> 128) - - -a, b = [random.randint(0, p - 1) for _ in range(2)] -a_s, b_s, p_s = split_128(a), split_128(b), split_128(p) - -q, r = divmod(a * b, p) -qs, rs = split_128(q), split_128(r) - -assert a * b == q * p + r - -# Evaluate in x=0 -assert ( - a_s[0] * b_s[0] % 2**128 - qs[0] * p_s[0] % 2**128 - rs[0] == 0 -), f"{a_s[0] * b_s[0] - qs[0] * p_s[0] - rs[0]}" diff --git a/tests/python_tests/casting_out_primes.py b/tests/python_tests/casting_out_primes.py deleted file mode 100644 index d9fd7acf..00000000 --- a/tests/python_tests/casting_out_primes.py +++ /dev/null @@ -1,156 +0,0 @@ -import random -from typing import Tuple, List -from math import lcm -from sympy import gcd -random.seed(42) - -STARKFp = 3618502788666131213697322783095070105623107215331596699973092056135872020481 -BN254Fp = 21888242871839275222246405745257275088696311157297823662689037894645226208583 -p = STARKFp -q = BN254Fp - -n = 3 # number of limbs -n_pi = 2*n - 1 # number of limbs in polynomial product -b = 2**86 # base - -def get_felt(p:int) -> int: - return random.randint(0, p-1) - -def split_fq(x:int) -> List[int]: - assert x >= 0, "Error: x must be positive" - coeffs, degree = [], n-1 - for i in range(degree, 0, -1): - q, r = divmod(x, b ** i) - coeffs.append(q) - x = r - coeffs.append(x) - return coeffs[::-1] - -# evaluate x(b) -def sigma_b(x:list) -> int: - result = 0 - for i in range(len(x)): - assert x[i] < b, f"Error: wrong bounds {x[i]} >= {b}" - result += b**i * x[i] - assert 0 <= result < b**(len(x)) - 1, f"Error: wrong bounds {result} >= {b**(len(x)) - 1}" - return result - -# multiply x(b) and y(b), returns limbs and x(b)*y(b) -def pi_b(x:list, y:list) -> Tuple[List[int], int]: - assert len(x) == len(y) == n, "Error: pi_b() requires two lists of length n" - limbs = n_pi*[0] - result = 0 - for i in range(n): - for j in range(n): - limbs[i+j] += x[i]*y[j] - result += x[i]*y[j] * b**(i+j) - assert 0 <= result < b**(2*n) - 1, f"Error: wrong bounds {result} >= {b**(2*n) - 1}" - return limbs, result - -# evaluate x(b) mod m -def sigma_b_mod_m(x:list, m:int) -> int: - result = 0 - assert len(x) == n_pi or len(x) == n, "Error: sigma_b_mod_m() requires a list of length n or n_pi" - for i in range(len(x)): - result += (b**i % m) * x[i] - assert result == sigma_b(x) % m, "Error: sigma_b_mod_m() is not working" - assert 0 <= result < n*m*b, "Error: wrong bounds" - return result - -# multiply x(b) and y(b) mod m, returns limbs and x(b)*y(b) mod m -def pi_b_mod_m(x:list, y:list, m:int) -> Tuple[List[int], int]: - assert len(x) == len(y) == n, "Error: pi_b() requires two lists of length n" - limbs = n_pi*[0] - result = 0 - for i in range(n): - for j in range(n): - limbs[i+j] += x[i]*y[j] - result += x[i]*y[j] * (b**(i+j)%m) - assert 0 <= result < n**2*m*b**2 - return limbs, result - -def sigma_b_mod_q_mod_m(x, m): - result = 0 - for i in range(len(x)): - result += ((b**i % q) % m) * x[i] - assert result % m == (sigma_b(x) % q) % m, f"Error : {result} != {sigma_b_mod_m(x, q) % m}" - return result - -def pi_b_mod_q_mod_m(x, y, m): - assert len(x) == len(y) == n, "Error: pi_b() requires two lists of length n" - limbs = n_pi*[0] - result = 0 - for i in range(n): - for j in range(n): - limbs[i+j] += x[i]*y[j] - result += x[i]*y[j] * ((b**(i+j)%q)%m) - assert 0 <= result < n**2*m*b**2 - return limbs, result - - -def get_m_bound() -> int: - return p//(4*(n**2)*(b**2)) -def get_lcm_bound() -> int: - return 2*(n**2)*q*(b**2) - -def generate_coprime_set(m_bound, lcm_bound) -> List[int]: - M=[p] - # Check if a candidate number is coprime to all elements in M - def is_coprime_to_all(candidate, M): - return all(gcd(candidate, m) == 1 for m in M) - candidate = m_bound - while candidate >= 2: - if is_coprime_to_all(candidate, M): - M.append(candidate) - if lcm(*M) >= lcm_bound: - break - candidate -= 1 - return M -def generate_consts(M): - consts = {} - for i in range(0, len(M)): - m = M[i] - d = {"m": m, "q_mod_m": q % m} - for j in range(1, n_pi): - d[f"bpow{j}modqmodm{i}"] = (b**j % q) % m - consts[f"M{i}"] = d - return consts - - - -def get_witness_z_r_s(x:list, y:list, M:list): - assert len(x) == len(y) == n, "Error: get_witness_z_and_q() requires two lists of length n" - z:list = split_fq(sigma_b(x) * sigma_b(y) % q) - pi:int = pi_b_mod_m(x, y, q)[1] - val_limbs = pi_b_mod_m(x, y, q)[0] - sigma:int = sigma_b_mod_m(z, q) - r_q = pi - sigma - assert r_q % q == 0, "Error: r_q is not divisible by q" - r = r_q // q - S = [] - for i in range(len(M)): - m = M[i] - pi:int = pi_b_mod_q_mod_m(x, y, m)[1] - sigma:int = sigma_b_mod_q_mod_m(z, m) - s_m = pi - sigma - r*(q%m) - assert s_m % m == 0, "Error: s_m is not divisible by m" - s = s_m // m - S.append(s) - return val_limbs, z, r, S - -m_bound = get_m_bound() -lcm_bound = get_lcm_bound() -M = generate_coprime_set(m_bound, lcm_bound) -consts = generate_consts(M) -print("Coprime set M:", set(M)) - -x_o,y_o = get_felt(q), get_felt(q) -x,y = split_fq(x_o), split_fq(y_o) -max = split_fq(q-1) - -assert sigma_b(x) == x_o, "Error: sigma_b() is not working" -assert sigma_b(y) == y_o, "Error: sigma_b() is not working" -assert pi_b(max, max)[1] == (q-1)**2, "Error: pi_b() is not working" -assert sigma_b_mod_m(max, q) == q-1, "Error: sigma_b_mod_m() is not working" - -val, z, r, S = get_witness_z_r_s(x, y, M) diff --git a/tools/gnark/bls12_381/fr/fft/doc.go b/tools/gnark/bls12_381/fr/fft/doc.go deleted file mode 100644 index 3c35170e..00000000 --- a/tools/gnark/bls12_381/fr/fft/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package fft provides in-place discrete Fourier transform. -package fft diff --git a/tools/gnark/bls12_381/fr/fft/domain.go b/tools/gnark/bls12_381/fr/fft/domain.go deleted file mode 100644 index a338e273..00000000 --- a/tools/gnark/bls12_381/fr/fft/domain.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "fmt" - "io" - "math/big" - "math/bits" - "runtime" - "sync" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - - curve "github.com/consensys/gnark-crypto/ecc/bls12-381" - - "github.com/consensys/gnark-crypto/ecc" -) - -// Domain with a power of 2 cardinality -// compute a field element of order 2x and store it in FinerGenerator -// all other values can be derived from x, GeneratorSqrt -type Domain struct { - Cardinality uint64 - CardinalityInv fr.Element - Generator fr.Element - GeneratorInv fr.Element - FrMultiplicativeGen fr.Element // generator of Fr* - FrMultiplicativeGenInv fr.Element - - // the following slices are not serialized and are (re)computed through domain.preComputeTwiddles() - - // Twiddles factor for the FFT using Generator for each stage of the recursive FFT - Twiddles [][]fr.Element - - // Twiddles factor for the FFT using GeneratorInv for each stage of the recursive FFT - TwiddlesInv [][]fr.Element - - // we precompute these mostly to avoid the memory intensive bit reverse permutation in the groth16.Prover - - // CosetTable u*<1,g,..,g^(n-1)> - CosetTable []fr.Element - CosetTableReversed []fr.Element // optional, this is computed on demand at the creation of the domain - - // CosetTable[i][j] = domain.Generator(i-th)SqrtInv ^ j - CosetTableInv []fr.Element - CosetTableInvReversed []fr.Element // optional, this is computed on demand at the creation of the domain -} - -// NewDomain returns a subgroup with a power of 2 cardinality -// cardinality >= m -func NewDomain(m uint64) *Domain { - - domain := &Domain{} - x := ecc.NextPowerOfTwo(m) - domain.Cardinality = uint64(x) - - // generator of the largest 2-adic subgroup - var rootOfUnity fr.Element - - rootOfUnity.SetString("10238227357739495823651030575849232062558860180284477541189508159991286009131") - const maxOrderRoot uint64 = 32 - domain.FrMultiplicativeGen.SetUint64(7) - - domain.FrMultiplicativeGenInv.Inverse(&domain.FrMultiplicativeGen) - - // find generator for Z/2^(log(m))Z - logx := uint64(bits.TrailingZeros64(x)) - if logx > maxOrderRoot { - panic(fmt.Sprintf("m (%d) is too big: the required root of unity does not exist", m)) - } - - // Generator = FinerGenerator^2 has order x - expo := uint64(1 << (maxOrderRoot - logx)) - domain.Generator.Exp(rootOfUnity, big.NewInt(int64(expo))) // order x - domain.GeneratorInv.Inverse(&domain.Generator) - domain.CardinalityInv.SetUint64(uint64(x)).Inverse(&domain.CardinalityInv) - - // twiddle factors - domain.preComputeTwiddles() - - // store the bit reversed coset tables - domain.reverseCosetTables() - - return domain -} - -func (d *Domain) reverseCosetTables() { - d.CosetTableReversed = make([]fr.Element, d.Cardinality) - d.CosetTableInvReversed = make([]fr.Element, d.Cardinality) - copy(d.CosetTableReversed, d.CosetTable) - copy(d.CosetTableInvReversed, d.CosetTableInv) - BitReverse(d.CosetTableReversed) - BitReverse(d.CosetTableInvReversed) -} - -func (d *Domain) preComputeTwiddles() { - - // nb fft stages - nbStages := uint64(bits.TrailingZeros64(d.Cardinality)) - - d.Twiddles = make([][]fr.Element, nbStages) - d.TwiddlesInv = make([][]fr.Element, nbStages) - d.CosetTable = make([]fr.Element, d.Cardinality) - d.CosetTableInv = make([]fr.Element, d.Cardinality) - - var wg sync.WaitGroup - - // for each fft stage, we pre compute the twiddle factors - twiddles := func(t [][]fr.Element, omega fr.Element) { - for i := uint64(0); i < nbStages; i++ { - t[i] = make([]fr.Element, 1+(1<<(nbStages-i-1))) - var w fr.Element - if i == 0 { - w = omega - } else { - w = t[i-1][2] - } - t[i][0] = fr.One() - t[i][1] = w - for j := 2; j < len(t[i]); j++ { - t[i][j].Mul(&t[i][j-1], &w) - } - } - wg.Done() - } - - expTable := func(sqrt fr.Element, t []fr.Element) { - t[0] = fr.One() - precomputeExpTable(sqrt, t) - wg.Done() - } - - wg.Add(4) - go twiddles(d.Twiddles, d.Generator) - go twiddles(d.TwiddlesInv, d.GeneratorInv) - go expTable(d.FrMultiplicativeGen, d.CosetTable) - go expTable(d.FrMultiplicativeGenInv, d.CosetTableInv) - - wg.Wait() - -} - -func precomputeExpTable(w fr.Element, table []fr.Element) { - n := len(table) - - // see if it makes sense to parallelize exp tables pre-computation - interval := 0 - if runtime.NumCPU() >= 4 { - interval = (n - 1) / (runtime.NumCPU() / 4) - } - - // this ratio roughly correspond to the number of multiplication one can do in place of a Exp operation - const ratioExpMul = 6000 / 17 - - if interval < ratioExpMul { - precomputeExpTableChunk(w, 1, table[1:]) - return - } - - // we parallelize - var wg sync.WaitGroup - for i := 1; i < n; i += interval { - start := i - end := i + interval - if end > n { - end = n - } - wg.Add(1) - go func() { - precomputeExpTableChunk(w, uint64(start), table[start:end]) - wg.Done() - }() - } - wg.Wait() -} - -func precomputeExpTableChunk(w fr.Element, power uint64, table []fr.Element) { - - // this condition ensures that creating a domain of size 1 with cosets don't fail - if len(table) > 0 { - table[0].Exp(w, new(big.Int).SetUint64(power)) - for i := 1; i < len(table); i++ { - table[i].Mul(&table[i-1], &w) - } - } -} - -// WriteTo writes a binary representation of the domain (without the precomputed twiddle factors) -// to the provided writer -func (d *Domain) WriteTo(w io.Writer) (int64, error) { - - enc := curve.NewEncoder(w) - - toEncode := []interface{}{d.Cardinality, &d.CardinalityInv, &d.Generator, &d.GeneratorInv, &d.FrMultiplicativeGen, &d.FrMultiplicativeGenInv} - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom attempts to decode a domain from Reader -func (d *Domain) ReadFrom(r io.Reader) (int64, error) { - - dec := curve.NewDecoder(r) - - toDecode := []interface{}{&d.Cardinality, &d.CardinalityInv, &d.Generator, &d.GeneratorInv, &d.FrMultiplicativeGen, &d.FrMultiplicativeGenInv} - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - // twiddle factors - d.preComputeTwiddles() - - // store the bit reversed coset tables if needed - d.reverseCosetTables() - - return dec.BytesRead(), nil -} diff --git a/tools/gnark/bls12_381/fr/fft/domain_test.go b/tools/gnark/bls12_381/fr/fft/domain_test.go deleted file mode 100644 index 14d23dd9..00000000 --- a/tools/gnark/bls12_381/fr/fft/domain_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "bytes" - "reflect" - "testing" -) - -func TestDomainSerialization(t *testing.T) { - - domain := NewDomain(1 << 6) - var reconstructed Domain - - var buf bytes.Buffer - written, err := domain.WriteTo(&buf) - if err != nil { - t.Fatal(err) - } - var read int64 - read, err = reconstructed.ReadFrom(&buf) - if err != nil { - t.Fatal(err) - } - - if written != read { - t.Fatal("didn't read as many bytes as we wrote") - } - if !reflect.DeepEqual(domain, &reconstructed) { - t.Fatal("Domain.SetBytes(Bytes()) failed") - } -} diff --git a/tools/gnark/bls12_381/fr/fft/fft.go b/tools/gnark/bls12_381/fr/fft/fft.go deleted file mode 100644 index 476b85a3..00000000 --- a/tools/gnark/bls12_381/fr/fft/fft.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "math/bits" - "runtime" - - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/internal/parallel" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" -) - -// Decimation is used in the FFT call to select decimation in time or in frequency -type Decimation uint8 - -const ( - DIT Decimation = iota - DIF -) - -// parallelize threshold for a single butterfly op, if the fft stage is not parallelized already -const butterflyThreshold = 16 - -// FFT computes (recursively) the discrete Fourier transform of a and stores the result in a -// if decimation == DIT (decimation in time), the input must be in bit-reversed order -// if decimation == DIF (decimation in frequency), the output will be in bit-reversed order -// if coset if set, the FFT(a) returns the evaluation of a on a coset. -func (domain *Domain) FFT(a []fr.Element, decimation Decimation, coset ...bool) { - - numCPU := uint64(runtime.NumCPU()) - - _coset := false - if len(coset) > 0 { - _coset = coset[0] - } - - // if coset != 0, scale by coset table - if _coset { - scale := func(cosetTable []fr.Element) { - parallel.Execute(len(a), func(start, end int) { - for i := start; i < end; i++ { - a[i].Mul(&a[i], &cosetTable[i]) - } - }) - } - if decimation == DIT { - scale(domain.CosetTableReversed) - - } else { - scale(domain.CosetTable) - } - } - - // find the stage where we should stop spawning go routines in our recursive calls - // (ie when we have as many go routines running as we have available CPUs) - maxSplits := bits.TrailingZeros64(ecc.NextPowerOfTwo(numCPU)) - if numCPU <= 1 { - maxSplits = -1 - } - - switch decimation { - case DIF: - difFFT(a, domain.Twiddles, 0, maxSplits, nil) - case DIT: - ditFFT(a, domain.Twiddles, 0, maxSplits, nil) - default: - panic("not implemented") - } -} - -// FFTInverse computes (recursively) the inverse discrete Fourier transform of a and stores the result in a -// if decimation == DIT (decimation in time), the input must be in bit-reversed order -// if decimation == DIF (decimation in frequency), the output will be in bit-reversed order -// coset sets the shift of the fft (0 = no shift, standard fft) -// len(a) must be a power of 2, and w must be a len(a)th root of unity in field F. -func (domain *Domain) FFTInverse(a []fr.Element, decimation Decimation, coset ...bool) { - - numCPU := uint64(runtime.NumCPU()) - - _coset := false - if len(coset) > 0 { - _coset = coset[0] - } - - // find the stage where we should stop spawning go routines in our recursive calls - // (ie when we have as many go routines running as we have available CPUs) - maxSplits := bits.TrailingZeros64(ecc.NextPowerOfTwo(numCPU)) - if numCPU <= 1 { - maxSplits = -1 - } - switch decimation { - case DIF: - difFFT(a, domain.TwiddlesInv, 0, maxSplits, nil) - case DIT: - ditFFT(a, domain.TwiddlesInv, 0, maxSplits, nil) - default: - panic("not implemented") - } - - // scale by CardinalityInv - if !_coset { - parallel.Execute(len(a), func(start, end int) { - for i := start; i < end; i++ { - a[i].Mul(&a[i], &domain.CardinalityInv) - } - }) - return - } - - scale := func(cosetTable []fr.Element) { - parallel.Execute(len(a), func(start, end int) { - for i := start; i < end; i++ { - a[i].Mul(&a[i], &cosetTable[i]). - Mul(&a[i], &domain.CardinalityInv) - } - }) - } - if decimation == DIT { - scale(domain.CosetTableInv) - return - } - - // decimation == DIF - scale(domain.CosetTableInvReversed) - -} - -func difFFT(a []fr.Element, twiddles [][]fr.Element, stage, maxSplits int, chDone chan struct{}) { - if chDone != nil { - defer close(chDone) - } - - n := len(a) - if n == 1 { - return - } else if n == 8 { - kerDIF8(a, twiddles, stage) - return - } - m := n >> 1 - - // if stage < maxSplits, we parallelize this butterfly - // but we have only numCPU / stage cpus available - if (m > butterflyThreshold) && (stage < maxSplits) { - // 1 << stage == estimated used CPUs - numCPU := runtime.NumCPU() / (1 << (stage)) - parallel.Execute(m, func(start, end int) { - for i := start; i < end; i++ { - fr.Butterfly(&a[i], &a[i+m]) - a[i+m].Mul(&a[i+m], &twiddles[stage][i]) - } - }, numCPU) - } else { - // i == 0 - fr.Butterfly(&a[0], &a[m]) - for i := 1; i < m; i++ { - fr.Butterfly(&a[i], &a[i+m]) - a[i+m].Mul(&a[i+m], &twiddles[stage][i]) - } - } - - if m == 1 { - return - } - - nextStage := stage + 1 - if stage < maxSplits { - chDone := make(chan struct{}, 1) - go difFFT(a[m:n], twiddles, nextStage, maxSplits, chDone) - difFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - <-chDone - } else { - difFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - difFFT(a[m:n], twiddles, nextStage, maxSplits, nil) - } - -} - -func ditFFT(a []fr.Element, twiddles [][]fr.Element, stage, maxSplits int, chDone chan struct{}) { - if chDone != nil { - defer close(chDone) - } - n := len(a) - if n == 1 { - return - } else if n == 8 { - kerDIT8(a, twiddles, stage) - return - } - m := n >> 1 - - nextStage := stage + 1 - - if stage < maxSplits { - // that's the only time we fire go routines - chDone := make(chan struct{}, 1) - go ditFFT(a[m:], twiddles, nextStage, maxSplits, chDone) - ditFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - <-chDone - } else { - ditFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - ditFFT(a[m:n], twiddles, nextStage, maxSplits, nil) - - } - - // if stage < maxSplits, we parallelize this butterfly - // but we have only numCPU / stage cpus available - if (m > butterflyThreshold) && (stage < maxSplits) { - // 1 << stage == estimated used CPUs - numCPU := runtime.NumCPU() / (1 << (stage)) - parallel.Execute(m, func(start, end int) { - for k := start; k < end; k++ { - a[k+m].Mul(&a[k+m], &twiddles[stage][k]) - fr.Butterfly(&a[k], &a[k+m]) - } - }, numCPU) - - } else { - fr.Butterfly(&a[0], &a[m]) - for k := 1; k < m; k++ { - a[k+m].Mul(&a[k+m], &twiddles[stage][k]) - fr.Butterfly(&a[k], &a[k+m]) - } - } -} - -// BitReverse applies the bit-reversal permutation to a. -// len(a) must be a power of 2 (as in every single function in this file) -func BitReverse(a []fr.Element) { - n := uint64(len(a)) - nn := uint64(64 - bits.TrailingZeros64(n)) - - for i := uint64(0); i < n; i++ { - irev := bits.Reverse64(i) >> nn - if irev > i { - a[i], a[irev] = a[irev], a[i] - } - } -} - -// kerDIT8 is a kernel that process a FFT of size 8 -func kerDIT8(a []fr.Element, twiddles [][]fr.Element, stage int) { - - fr.Butterfly(&a[0], &a[1]) - fr.Butterfly(&a[2], &a[3]) - fr.Butterfly(&a[4], &a[5]) - fr.Butterfly(&a[6], &a[7]) - fr.Butterfly(&a[0], &a[2]) - a[3].Mul(&a[3], &twiddles[stage+1][1]) - fr.Butterfly(&a[1], &a[3]) - fr.Butterfly(&a[4], &a[6]) - a[7].Mul(&a[7], &twiddles[stage+1][1]) - fr.Butterfly(&a[5], &a[7]) - fr.Butterfly(&a[0], &a[4]) - a[5].Mul(&a[5], &twiddles[stage+0][1]) - fr.Butterfly(&a[1], &a[5]) - a[6].Mul(&a[6], &twiddles[stage+0][2]) - fr.Butterfly(&a[2], &a[6]) - a[7].Mul(&a[7], &twiddles[stage+0][3]) - fr.Butterfly(&a[3], &a[7]) -} - -// kerDIF8 is a kernel that process a FFT of size 8 -func kerDIF8(a []fr.Element, twiddles [][]fr.Element, stage int) { - - fr.Butterfly(&a[0], &a[4]) - fr.Butterfly(&a[1], &a[5]) - fr.Butterfly(&a[2], &a[6]) - fr.Butterfly(&a[3], &a[7]) - a[5].Mul(&a[5], &twiddles[stage+0][1]) - a[6].Mul(&a[6], &twiddles[stage+0][2]) - a[7].Mul(&a[7], &twiddles[stage+0][3]) - fr.Butterfly(&a[0], &a[2]) - fr.Butterfly(&a[1], &a[3]) - fr.Butterfly(&a[4], &a[6]) - fr.Butterfly(&a[5], &a[7]) - a[3].Mul(&a[3], &twiddles[stage+1][1]) - a[7].Mul(&a[7], &twiddles[stage+1][1]) - fr.Butterfly(&a[0], &a[1]) - fr.Butterfly(&a[2], &a[3]) - fr.Butterfly(&a[4], &a[5]) - fr.Butterfly(&a[6], &a[7]) -} diff --git a/tools/gnark/bls12_381/fr/fft/fft_test.go b/tools/gnark/bls12_381/fr/fft/fft_test.go deleted file mode 100644 index 0b0053f4..00000000 --- a/tools/gnark/bls12_381/fr/fft/fft_test.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "math/big" - "strconv" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - "github.com/leanovate/gopter/prop" -) - -func TestFFT(t *testing.T) { - const maxSize = 1 << 10 - - nbCosets := 3 - domainWithPrecompute := NewDomain(maxSize) - - parameters := gopter.DefaultTestParameters() - parameters.MinSuccessfulTests = 5 - - properties := gopter.NewProperties(parameters) - - properties.Property("DIF FFT should be consistent with dual basis", prop.ForAll( - - // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result - func(ithpower int) bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFT(pol, DIF, false) - BitReverse(pol) - - sample := domainWithPrecompute.Generator - sample.Exp(sample, big.NewInt(int64(ithpower))) - - eval := evaluatePolynomial(backupPol, sample) - - return eval.Equal(&pol[ithpower]) - - }, - gen.IntRange(0, maxSize-1), - )) - - properties.Property("DIF FFT on cosets should be consistent with dual basis", prop.ForAll( - - // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result - func(ithpower int) bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFT(pol, DIF, true) - BitReverse(pol) - - sample := domainWithPrecompute.Generator - sample.Exp(sample, big.NewInt(int64(ithpower))). - Mul(&sample, &domainWithPrecompute.FrMultiplicativeGen) - - eval := evaluatePolynomial(backupPol, sample) - - return eval.Equal(&pol[ithpower]) - - }, - gen.IntRange(0, maxSize-1), - )) - - properties.Property("DIT FFT should be consistent with dual basis", prop.ForAll( - - // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result - func(ithpower int) bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - BitReverse(pol) - domainWithPrecompute.FFT(pol, DIT, false) - - sample := domainWithPrecompute.Generator - sample.Exp(sample, big.NewInt(int64(ithpower))) - - eval := evaluatePolynomial(backupPol, sample) - - return eval.Equal(&pol[ithpower]) - - }, - gen.IntRange(0, maxSize-1), - )) - - properties.Property("bitReverse(DIF FFT(DIT FFT (bitReverse))))==id", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - BitReverse(pol) - domainWithPrecompute.FFT(pol, DIT, false) - domainWithPrecompute.FFTInverse(pol, DIF, false) - BitReverse(pol) - - check := true - for i := 0; i < len(pol); i++ { - check = check && pol[i].Equal(&backupPol[i]) - } - return check - }, - )) - - properties.Property("bitReverse(DIF FFT(DIT FFT (bitReverse))))==id on cosets", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - check := true - - for i := 1; i <= nbCosets; i++ { - - BitReverse(pol) - domainWithPrecompute.FFT(pol, DIT, true) - domainWithPrecompute.FFTInverse(pol, DIF, true) - BitReverse(pol) - - for i := 0; i < len(pol); i++ { - check = check && pol[i].Equal(&backupPol[i]) - } - } - - return check - }, - )) - - properties.Property("DIT FFT(DIF FFT)==id", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFTInverse(pol, DIF, false) - domainWithPrecompute.FFT(pol, DIT, false) - - check := true - for i := 0; i < len(pol); i++ { - check = check && (pol[i] == backupPol[i]) - } - return check - }, - )) - - properties.Property("DIT FFT(DIF FFT)==id on cosets", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFTInverse(pol, DIF, true) - domainWithPrecompute.FFT(pol, DIT, true) - - check := true - for i := 0; i < len(pol); i++ { - check = check && (pol[i] == backupPol[i]) - } - return check - }, - )) - - properties.TestingRun(t, gopter.ConsoleReporter(false)) - -} - -// -------------------------------------------------------------------- -// benches -func BenchmarkBitReverse(b *testing.B) { - - const maxSize = 1 << 20 - - pol := make([]fr.Element, maxSize) - pol[0].SetRandom() - for i := 1; i < maxSize; i++ { - pol[i] = pol[i-1] - } - - for i := 8; i < 20; i++ { - b.Run("bit reversing 2**"+strconv.Itoa(i)+"bits", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - BitReverse(pol[:1<x², on a - // power of 2 subgroup of Fr^{*}. - RADIX_2_FRI IOPP = iota -) - -// round contains the data corresponding to a single round -// of fri. -// It consists of a list of Interactions between the prover and the verifier, -// where each interaction contains a challenge provided by the verifier, as -// well as MerkleProofs for the queries of the verifier. The Merkle proofs -// correspond to the openings of the i-th folded polynomial at 2 points that -// belong to the same fiber of x -> x². -type Round struct { - - // stores the Interactions between the prover and the verifier. - // Each interaction results in a set or merkle proofs, corresponding - // to the queries of the verifier. - Interactions [][2]MerkleProof - - // evaluation stores the evaluation of the fully folded polynomial. - // The fully folded polynomial is constant, and is evaluated on a - // a set of size \rho. Since the polynomial is supposed to be constant, - // only one evaluation, corresponding to the polynomial, is given. Since - // the prover cannot know in advance which entry the verifier will query, - // providing a single evaluation - Evaluation fr.Element -} - -// ProofOfProximity proof of proximity, attesting that -// a function is d-close to a low degree polynomial. -// -// It is composed of a series of Interactions, emulated with Fiat Shamir, -type ProofOfProximity struct { - - // ID unique ID attached to the proof of proximity. It's needed for - // protocols using Fiat Shamir for instance, where challenges are derived - // from the proof of proximity. - ID []byte - - // round contains the data corresponding to a single round - // of fri. There are nbRounds rounds of Interactions. - Rounds []Round -} - -// Iopp interface that an iopp should implement -type Iopp interface { - - // BuildProofOfProximity creates a proof of proximity that p is d-close to a polynomial - // of degree len(p). The proof is built non interactively using Fiat Shamir. - BuildProofOfProximity(p []fr.Element) (ProofOfProximity, error) - - // VerifyProofOfProximity verifies the proof of proximity. It returns an error if the - // verification fails. - VerifyProofOfProximity(proof ProofOfProximity) error - - // Opens a polynomial at gⁱ where i = position. - Open(p []fr.Element, position uint64) (OpeningProof, error) - - // Verifies the opening of a polynomial at gⁱ where i = position. - VerifyOpening(position uint64, openingProof OpeningProof, pp ProofOfProximity) error -} - -// GetRho returns the factor ρ = size_code_word/size_polynomial -func GetRho() int { - return rho -} - -func init() { - twoInv.SetUint64(2).Inverse(&twoInv) -} - -// New creates a new IOPP capable to handle degree(size) polynomials. -func (iopp IOPP) New(size uint64, h hash.Hash) Iopp { - switch iopp { - case RADIX_2_FRI: - return newRadixTwoFri(size, h) - default: - panic("iopp name is not recognized") - } -} - -// radixTwoFri empty structs implementing compressionFunction for -// the squaring function. -type radixTwoFri struct { - - // hash function that is used for Fiat Shamir and for committing to - // the oracles. - h hash.Hash - - // nbSteps number of Interactions between the prover and the verifier - nbSteps int - - // domain used to build the Reed Solomon code from the given polynomial. - // The size of the domain is ρ*size_polynomial. - domain *fft.Domain -} - -func newRadixTwoFri(size uint64, h hash.Hash) radixTwoFri { - - var res radixTwoFri - - // computing the number of steps - n := ecc.NextPowerOfTwo(size) - nbSteps := bits.TrailingZeros(uint(n)) - res.nbSteps = nbSteps - - // extending the domain - n = n * rho - - // building the domains - res.domain = fft.NewDomain(n) - - // hash function - res.h = h - - return res -} - -// convertCanonicalSorted convert the index i, an entry in a -// sorted polynomial, to the corresponding entry in canonical -// representation. n is the size of the polynomial. -func convertCanonicalSorted(i, n int) int { - - if i < n/2 { - return 2 * i - } else { - l := n - (i + 1) - l = 2 * l - return n - l - 1 - } - -} - -// deriveQueriesPositions derives the indices of the oracle -// function that the verifier has to pick, in sorted form. -// * pos is the initial position, i.e. the logarithm of the first challenge -// * size is the size of the initial polynomial -// * The result is a slice of []int, where each entry is a tuple (iₖ), such that -// the verifier needs to evaluate ∑ₖ oracle(iₖ)xᵏ to build -// the folded function. -func (s radixTwoFri) deriveQueriesPositions(pos int, size int) []int { - - _s := size / 2 - res := make([]int, s.nbSteps) - res[0] = pos - for i := 1; i < s.nbSteps; i++ { - t := (res[i-1] - (res[i-1] % 2)) / 2 - res[i] = convertCanonicalSorted(t, _s) - _s = _s / 2 - } - - return res -} - -// sort orders the evaluation of a polynomial on a domain -// such that contiguous entries are in the same fiber: -// {q(g⁰), q(g^{n/2}), q(g¹), q(g^{1+n/2}),...,q(g^{n/2-1}), q(gⁿ⁻¹)} -func sort(evaluations []fr.Element) []fr.Element { - q := make([]fr.Element, len(evaluations)) - n := len(evaluations) / 2 - for i := 0; i < n; i++ { - q[2*i].Set(&evaluations[i]) - q[2*i+1].Set(&evaluations[i+n]) - } - return q -} - -// Opens a polynomial at gⁱ where i = position. -func (s radixTwoFri) Open(p []fr.Element, position uint64) (OpeningProof, error) { - - // check that position is in the correct range - if position >= s.domain.Cardinality { - return OpeningProof{}, ErrRangePosition - } - - // put q in evaluation form - q := make([]fr.Element, s.domain.Cardinality) - copy(q, p) - s.domain.FFT(q, fft.DIF) - fft.BitReverse(q) - - // sort q to have fibers in contiguous entries. The goal is to have one - // Merkle path for both openings of entries which are in the same fiber. - q = sort(q) - - // build the Merkle proof, we the position is converted to fit the sorted polynomial - pos := convertCanonicalSorted(int(position), len(q)) - - tree := merkletree.New(s.h) - err := tree.SetIndex(uint64(pos)) - if err != nil { - return OpeningProof{}, err - } - for i := 0; i < len(q); i++ { - tree.Push(q[i].Marshal()) - } - var res OpeningProof - res.merkleRoot, res.ProofSet, res.index, res.numLeaves = tree.Prove() - - // set the claimed value, which is the first entry of the Merkle proof - res.ClaimedValue.SetBytes(res.ProofSet[0]) - - return res, nil -} - -// Verifies the opening of a polynomial. -// * position the point at which the proof is opened (the point is gⁱ where i = position) -// * openingProof Merkle path proof -// * pp proof of proximity, needed because before opening Merkle path proof one should be sure that the -// committed values come from a polynomial. During the verification of the Merkle path proof, the root -// hash of the Merkle path is compared to the root hash of the first interaction of the proof of proximity, -// those should be equal, if not an error is raised. -func (s radixTwoFri) VerifyOpening(position uint64, openingProof OpeningProof, pp ProofOfProximity) error { - - // To query the Merkle path, we look at the first series of Interactions, and check whether it's the point - // at 'position' or its neighbor that contains the full Merkle path. - var fullMerkleProof int - if len(pp.Rounds[0].Interactions[0][0].ProofSet) > len(pp.Rounds[0].Interactions[0][1].ProofSet) { - fullMerkleProof = 0 - } else { - fullMerkleProof = 1 - } - - // check that the merkle roots coincide - if !bytes.Equal(openingProof.merkleRoot, pp.Rounds[0].Interactions[0][fullMerkleProof].MerkleRoot) { - return ErrMerkleRoot - } - - // convert position to the sorted version - sizePoly := s.domain.Cardinality - pos := convertCanonicalSorted(int(position), int(sizePoly)) - - // check the Merkle proof - res := merkletree.VerifyProof(s.h, openingProof.merkleRoot, openingProof.ProofSet, uint64(pos), openingProof.numLeaves) - if !res { - return ErrMerklePath - } - return nil - -} - -// foldPolynomialLagrangeBasis folds a polynomial p, expressed in Lagrange basis. -// -// Fᵣ[X]/(Xⁿ-1) is a free module of rank 2 on Fᵣ[Y]/(Y^{n/2}-1). If -// p∈ Fᵣ[X]/(Xⁿ-1), expressed in Lagrange basis, the function finds the coordinates -// p₁, p₂ of p in Fᵣ[Y]/(Y^{n/2}-1), expressed in Lagrange basis. Finally, it computes -// p₁ + x*p₂ and returns it. -// -// * p is the polynomial to fold, in Lagrange basis, sorted like this: p = [p(1),p(-1),p(g),p(-g),p(g²),p(-g²),...] -// * g is a generator of the subgroup of Fᵣ^{*} of size len(p) -// * x is the folding challenge x, used to return p₁+x*p₂ -func foldPolynomialLagrangeBasis(pSorted []fr.Element, gInv, x fr.Element) []fr.Element { - - // we have the following system - // p₁(g²ⁱ)+gⁱp₂(g²ⁱ) = p(gⁱ) - // p₁(g²ⁱ)-gⁱp₂(g²ⁱ) = p(-gⁱ) - // we solve the system for p₁(g²ⁱ),p₂(g²ⁱ) - s := len(pSorted) - res := make([]fr.Element, s/2) - - var p1, p2, acc fr.Element - acc.SetOne() - - for i := 0; i < s/2; i++ { - - p1.Add(&pSorted[2*i], &pSorted[2*i+1]) - p2.Sub(&pSorted[2*i], &pSorted[2*i+1]).Mul(&p2, &acc) - res[i].Mul(&p2, &x).Add(&res[i], &p1).Mul(&res[i], &twoInv) - - acc.Mul(&acc, &gInv) - - } - - return res -} - -// paddNaming takes s = 0xA1.... and turns -// it into s' = 0xA1.. || 0..0 of size frSize bytes. -// Using this, when writing the domain separator in FiatShamir, it takes -// the same size as a snark variable (=number of byte in the block of a snark compliant -// hash function like mimc), so it is compliant with snark circuit. -func paddNaming(s string, size int) string { - a := make([]byte, size) - b := []byte(s) - copy(a, b) - return string(a) -} - -// buildProofOfProximitySingleRound generates a proof that a function, given as an oracle from -// the verifier point of view, is in fact δ-close to a polynomial. -// * salt is a variable for multi rounds, it allows to generate different challenges using Fiat Shamir -// * p is in evaluation form -func (s radixTwoFri) buildProofOfProximitySingleRound(salt fr.Element, p []fr.Element) (Round, error) { - - // the proof will contain nbSteps Interactions - var res Round - res.Interactions = make([][2]MerkleProof, s.nbSteps) - - // Fiat Shamir transcript to derive the challenges. The xᵢ are used to fold the - // polynomials. - // During the i-th round, the prover has a polynomial P of degree n. The verifier sends - // xᵢ∈ Fᵣ to the prover. The prover expresses F in Fᵣ[X,Y]/ as - // P₀(Y)+X P₁(Y) where P₀, P₁ are of degree n/2, and he then folds the polynomial - // by replacing x by xᵢ. - xis := make([]string, s.nbSteps+1) - for i := 0; i < s.nbSteps; i++ { - xis[i] = paddNaming(fmt.Sprintf("x%d", i), fr.Bytes) - } - xis[s.nbSteps] = paddNaming("s0", fr.Bytes) - fs := fiatshamir.NewTranscript(s.h, xis...) - - // the salt is binded to the first challenge, to ensure the challenges - // are different at each round. - err := fs.Bind(xis[0], salt.Marshal()) - if err != nil { - return Round{}, err - } - - // step 1 : fold the polynomial using the xi - - // evalsAtRound stores the list of the nbSteps polynomial evaluations, each evaluation - // corresponds to the evaluation o the folded polynomial at round i. - evalsAtRound := make([][]fr.Element, s.nbSteps) - - // evaluate p and sort the result - _p := make([]fr.Element, s.domain.Cardinality) - copy(_p, p) - - // gInv inverse of the generator of the cyclic group of size the size of the polynomial. - // The size of the cyclic group is ρ*s.domainSize, and not s.domainSize. - var gInv fr.Element - gInv.Set(&s.domain.GeneratorInv) - - for i := 0; i < s.nbSteps; i++ { - - evalsAtRound[i] = sort(_p) - - // compute the root hash, needed to derive xi - t := merkletree.New(s.h) - for k := 0; k < len(_p); k++ { - t.Push(evalsAtRound[i][k].Marshal()) - } - rh := t.Root() - err := fs.Bind(xis[i], rh) - if err != nil { - return res, err - } - - // derive the challenge - bxi, err := fs.ComputeChallenge(xis[i]) - if err != nil { - return res, err - } - var xi fr.Element - xi.SetBytes(bxi) - - // fold _p, reusing its memory - _p = foldPolynomialLagrangeBasis(evalsAtRound[i], gInv, xi) - - // g <- g² - gInv.Square(&gInv) - - } - - // last round, provide the evaluation. The fully folded polynomial is of size rho. It should - // correspond to the evaluation of a polynomial of degree 1 on ρ points, so those points - // are supposed to be on a line. - res.Evaluation.Set(&_p[0]) - - // step 2: provide the Merkle proofs of the queries - - // derive the verifier queries - err = fs.Bind(xis[s.nbSteps], res.Evaluation.Marshal()) - if err != nil { - return res, err - } - binSeed, err := fs.ComputeChallenge(xis[s.nbSteps]) - if err != nil { - return res, err - } - var bPos, bCardinality big.Int - bPos.SetBytes(binSeed) - bCardinality.SetUint64(s.domain.Cardinality) - bPos.Mod(&bPos, &bCardinality) - si := s.deriveQueriesPositions(int(bPos.Uint64()), int(s.domain.Cardinality)) - - for i := 0; i < s.nbSteps; i++ { - - // build proofs of queries at s[i] - t := merkletree.New(s.h) - err := t.SetIndex(uint64(si[i])) - if err != nil { - return res, err - } - for k := 0; k < len(evalsAtRound[i]); k++ { - t.Push(evalsAtRound[i][k].Marshal()) - } - mr, ProofSet, _, numLeaves := t.Prove() - - // c denotes the entry that contains the full Merkle proof. The entry 1-c will - // only contain 2 elements, which are the neighbor point, and the hash of the - // first point. The remaining of the Merkle path is common to both the original - // point and its neighbor. - c := si[i] % 2 - res.Interactions[i][c] = MerkleProof{mr, ProofSet, numLeaves} - res.Interactions[i][1-c] = MerkleProof{ - mr, - make([][]byte, 2), - numLeaves, - } - res.Interactions[i][1-c].ProofSet[0] = evalsAtRound[i][si[i]+1-2*c].Marshal() - s.h.Reset() - _, err = s.h.Write(res.Interactions[i][c].ProofSet[0]) - if err != nil { - return res, err - } - res.Interactions[i][1-c].ProofSet[1] = s.h.Sum(nil) - - } - - return res, nil - -} - -// BuildProofOfProximity generates a proof that a function, given as an oracle from -// the verifier point of view, is in fact δ-close to a polynomial. -func (s radixTwoFri) BuildProofOfProximity(p []fr.Element) (ProofOfProximity, error) { - - // the proof will contain nbSteps Interactions - var proof ProofOfProximity - proof.Rounds = make([]Round, nbRounds) - - // evaluate p - // evaluate p and sort the result - _p := make([]fr.Element, s.domain.Cardinality) - copy(_p, p) - s.domain.FFT(_p, fft.DIF) - fft.BitReverse(_p) - - var err error - var salt, one fr.Element - one.SetOne() - for i := 0; i < nbRounds; i++ { - proof.Rounds[i], err = s.buildProofOfProximitySingleRound(salt, _p) - if err != nil { - return proof, err - } - salt.Add(&salt, &one) - } - - return proof, nil -} - -// verifyProofOfProximitySingleRound verifies the proof of proximity. It returns an error if the -// verification fails. -func (s radixTwoFri) verifyProofOfProximitySingleRound(salt fr.Element, proof Round) error { - - // Fiat Shamir transcript to derive the challenges - xis := make([]string, s.nbSteps+1) - for i := 0; i < s.nbSteps; i++ { - xis[i] = paddNaming(fmt.Sprintf("x%d", i), fr.Bytes) - } - xis[s.nbSteps] = paddNaming("s0", fr.Bytes) - fs := fiatshamir.NewTranscript(s.h, xis...) - - xi := make([]fr.Element, s.nbSteps) - - // the salt is binded to the first challenge, to ensure the challenges - // are different at each round. - err := fs.Bind(xis[0], salt.Marshal()) - if err != nil { - return err - } - - for i := 0; i < s.nbSteps; i++ { - err := fs.Bind(xis[i], proof.Interactions[i][0].MerkleRoot) - if err != nil { - return err - } - bxi, err := fs.ComputeChallenge(xis[i]) - if err != nil { - return err - } - xi[i].SetBytes(bxi) - } - - // derive the verifier queries - // for i := 0; i < len(proof.evaluation); i++ { - // err := fs.Bind(xis[s.nbSteps], proof.evaluation[i].Marshal()) - // if err != nil { - // return err - // } - // } - err = fs.Bind(xis[s.nbSteps], proof.Evaluation.Marshal()) - if err != nil { - return err - } - binSeed, err := fs.ComputeChallenge(xis[s.nbSteps]) - if err != nil { - return err - } - var bPos, bCardinality big.Int - bPos.SetBytes(binSeed) - bCardinality.SetUint64(s.domain.Cardinality) - bPos.Mod(&bPos, &bCardinality) - si := s.deriveQueriesPositions(int(bPos.Uint64()), int(s.domain.Cardinality)) - - // for each round check the Merkle proof and the correctness of the folding - - // current size of the polynomial - var accGInv fr.Element - accGInv.Set(&s.domain.GeneratorInv) - for i := 0; i < s.nbSteps; i++ { - - // correctness of Merkle proof - // c is the entry containing the full Merkle proof. - c := si[i] % 2 - res := merkletree.VerifyProof( - s.h, - proof.Interactions[i][c].MerkleRoot, - proof.Interactions[i][c].ProofSet, - uint64(si[i]), - proof.Interactions[i][c].numLeaves, - ) - if !res { - return ErrMerklePath - } - - // we verify the Merkle proof for the neighbor query, to do that we have - // to pick the full Merkle proof of the first entry, stripped off of the leaf and - // the first node. We replace the leaf and the first node by the leaf and the first - // node of the partial Merkle proof, since the leaf and the first node of both proofs - // are the only entries that differ. - ProofSet := make([][]byte, len(proof.Interactions[i][c].ProofSet)) - copy(ProofSet[2:], proof.Interactions[i][c].ProofSet[2:]) - ProofSet[0] = proof.Interactions[i][1-c].ProofSet[0] - ProofSet[1] = proof.Interactions[i][1-c].ProofSet[1] - res = merkletree.VerifyProof( - s.h, - proof.Interactions[i][1-c].MerkleRoot, - ProofSet, - uint64(si[i]+1-2*c), - proof.Interactions[i][1-c].numLeaves, - ) - if !res { - return ErrMerklePath - } - - // correctness of the folding - if i < s.nbSteps-1 { - - var fe, fo, l, r, fn fr.Element - - // l = P(gⁱ), r = P(g^{i+n/2}) - l.SetBytes(proof.Interactions[i][0].ProofSet[0]) - r.SetBytes(proof.Interactions[i][1].ProofSet[0]) - - // (g^{si[i]}, g^{si[i]+1}) is the fiber of g^{2*si[i]}. The system to solve - // (for P₀(g^{2si[i]}), P₀(g^{2si[i]}) ) is: - // P(g^{si[i]}) = P₀(g^{2si[i]}) + g^{si[i]/2}*P₀(g^{2si[i]}) - // P(g^{si[i]+1}) = P₀(g^{2si[i]}) - g^{si[i]/2}*P₀(g^{2si[i]}) - bm := big.NewInt(int64(si[i] / 2)) - var ginv fr.Element - ginv.Exp(accGInv, bm) - fe.Add(&l, &r) // P₁(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Sub(&l, &r).Mul(&fo, &ginv) // P₀(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Mul(&fo, &xi[i]).Add(&fo, &fe).Mul(&fo, &twoInv) // P₀(g²ⁱ) + xᵢ * P₁(g²ⁱ) - - fn.SetBytes(proof.Interactions[i+1][si[i+1]%2].ProofSet[0]) - - if !fo.Equal(&fn) { - return ErrProximityTestFolding - } - - // next inverse generator - accGInv.Square(&accGInv) - } - - } - - // last transition - var fe, fo, l, r fr.Element - - l.SetBytes(proof.Interactions[s.nbSteps-1][0].ProofSet[0]) - r.SetBytes(proof.Interactions[s.nbSteps-1][1].ProofSet[0]) - - _si := si[s.nbSteps-1] / 2 - - accGInv.Exp(accGInv, big.NewInt(int64(_si))) - - fe.Add(&l, &r) // P₁(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Sub(&l, &r).Mul(&fo, &accGInv) // P₀(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Mul(&fo, &xi[s.nbSteps-1]).Add(&fo, &fe).Mul(&fo, &twoInv) // P₀(g²ⁱ) + xᵢ * P₁(g²ⁱ) - - // Last step: the final evaluation should be the evaluation of a degree 0 polynomial, - // so it must be constant. - if !fo.Equal(&proof.Evaluation) { - return ErrProximityTestFolding - } - - return nil -} - -// VerifyProofOfProximity verifies the proof, by checking each interaction one -// by one. -func (s radixTwoFri) VerifyProofOfProximity(proof ProofOfProximity) error { - - var salt, one fr.Element - one.SetOne() - for i := 0; i < nbRounds; i++ { - err := s.verifyProofOfProximitySingleRound(salt, proof.Rounds[i]) - if err != nil { - return err - } - salt.Add(&salt, &one) - } - return nil - -} diff --git a/tools/gnark/bls12_381/fr/fri/fri_test.go b/tools/gnark/bls12_381/fr/fri/fri_test.go deleted file mode 100644 index 084508db..00000000 --- a/tools/gnark/bls12_381/fr/fri/fri_test.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fri - -import ( - "crypto/sha256" - "fmt" - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - "github.com/leanovate/gopter/prop" -) - -// logFiber returns u, v such that {g^u, g^v} = f⁻¹((g²)^{_p}) -func logFiber(_p, _n int) (_u, _v big.Int) { - if _p%2 == 0 { - _u.SetInt64(int64(_p / 2)) - _v.SetInt64(int64(_p/2 + _n/2)) - } else { - l := (_n - 1 - _p) / 2 - _u.SetInt64(int64(_n - 1 - l)) - _v.SetInt64(int64(_n - 1 - l - _n/2)) - } - return -} - -func randomPolynomial(size uint64, seed int32) []fr.Element { - p := make([]fr.Element, size) - p[0].SetUint64(uint64(seed)) - for i := 1; i < len(p); i++ { - p[i].Square(&p[i-1]) - } - return p -} - -// convertOrderCanonical convert the index i, an entry in a -// sorted polynomial, to the corresponding entry in canonical -// representation. n is the size of the polynomial. -func convertSortedCanonical(i, n int) int { - if i%2 == 0 { - return i / 2 - } else { - l := (n - 1 - i) / 2 - return n - 1 - l - } -} - -func TestFRI(t *testing.T) { - - parameters := gopter.DefaultTestParameters() - parameters.MinSuccessfulTests = 10 - - properties := gopter.NewProperties(parameters) - - size := 4096 - - properties.Property("verifying wrong opening should fail", prop.ForAll( - - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - p := randomPolynomial(uint64(size), m) - - pos := int64(m % 4096) - pp, _ := s.BuildProofOfProximity(p) - - openingProof, err := s.Open(p, uint64(pos)) - if err != nil { - t.Fatal(err) - } - - // check the Merkle path - tamperedPosition := pos + 1 - err = s.VerifyOpening(uint64(tamperedPosition), openingProof, pp) - - return err != nil - - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("verifying correct opening should succeed", prop.ForAll( - - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - p := randomPolynomial(uint64(size), m) - - pos := uint64(m % int32(size)) - pp, _ := s.BuildProofOfProximity(p) - - openingProof, err := s.Open(p, uint64(pos)) - if err != nil { - t.Fatal(err) - } - - // check the Merkle path - err = s.VerifyOpening(uint64(pos), openingProof, pp) - - return err == nil - - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("The claimed value of a polynomial should match P(x)", prop.ForAll( - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - p := randomPolynomial(uint64(size), m) - - // check the opening value - var g fr.Element - pos := int64(m % 4096) - g.Set(&s.domain.Generator) - g.Exp(g, big.NewInt(pos)) - - var val fr.Element - for i := len(p) - 1; i >= 0; i-- { - val.Mul(&val, &g) - val.Add(&p[i], &val) - } - - openingProof, err := s.Open(p, uint64(pos)) - if err != nil { - t.Fatal(err) - } - - return openingProof.ClaimedValue.Equal(&val) - - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("Derive queries position: points should belong the correct fiber", prop.ForAll( - - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - var g fr.Element - - _m := int(m) % size - pos := s.deriveQueriesPositions(_m, int(s.domain.Cardinality)) - g.Set(&s.domain.Generator) - n := int(s.domain.Cardinality) - - for i := 0; i < len(pos)-1; i++ { - - u, v := logFiber(pos[i], n) - - var g1, g2, g3 fr.Element - g1.Exp(g, &u).Square(&g1) - g2.Exp(g, &v).Square(&g2) - nextPos := convertSortedCanonical(pos[i+1], n/2) - g3.Square(&g).Exp(g3, big.NewInt(int64(nextPos))) - - if !g1.Equal(&g2) || !g1.Equal(&g3) { - return false - } - g.Square(&g) - n = n >> 1 - } - return true - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("verifying a correctly formed proof should succeed", prop.ForAll( - - func(s int32) bool { - - p := randomPolynomial(uint64(size), s) - - iop := RADIX_2_FRI.New(uint64(size), sha256.New()) - proof, err := iop.BuildProofOfProximity(p) - if err != nil { - t.Fatal(err) - } - - err = iop.VerifyProofOfProximity(proof) - return err == nil - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.TestingRun(t, gopter.ConsoleReporter(false)) - -} - -// Benchmarks - -func BenchmarkProximityVerification(b *testing.B) { - - baseSize := 16 - - for i := 0; i < 10; i++ { - - size := baseSize << i - p := make([]fr.Element, size) - for k := 0; k < size; k++ { - p[k].SetRandom() - } - - iop := RADIX_2_FRI.New(uint64(size), sha256.New()) - proof, _ := iop.BuildProofOfProximity(p) - - b.Run(fmt.Sprintf("Polynomial size %d", size), func(b *testing.B) { - b.ResetTimer() - for l := 0; l < b.N; l++ { - iop.VerifyProofOfProximity(proof) - } - }) - - } -} diff --git a/tools/gnark/bls12_381/fr/gkr/gkr.go b/tools/gnark/bls12_381/fr/gkr/gkr.go deleted file mode 100644 index 185b4455..00000000 --- a/tools/gnark/bls12_381/fr/gkr/gkr.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package gkr - -import ( - "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/polynomial" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/sumcheck" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "strconv" - "sync" -) - -// The goal is to prove/verify evaluations of many instances of the same circuit - -// Gate must be a low-degree polynomial -type Gate interface { - Evaluate(...fr.Element) fr.Element - Degree() int -} - -type Wire struct { - Gate Gate - Inputs []*Wire // if there are no Inputs, the wire is assumed an input wire - nbUniqueOutputs int // number of other wires using it as input, not counting duplicates (i.e. providing two inputs to the same gate counts as one) -} - -type Circuit []Wire - -func (w Wire) IsInput() bool { - return len(w.Inputs) == 0 -} - -func (w Wire) IsOutput() bool { - return w.nbUniqueOutputs == 0 -} - -func (w Wire) NbClaims() int { - if w.IsOutput() { - return 1 - } - return w.nbUniqueOutputs -} - -func (w Wire) noProof() bool { - return w.IsInput() && w.NbClaims() == 1 -} - -// WireAssignment is assignment of values to the same wire across many instances of the circuit -type WireAssignment map[*Wire]polynomial.MultiLin - -type Proof []sumcheck.Proof // for each layer, for each wire, a sumcheck (for each variable, a polynomial) - -type eqTimesGateEvalSumcheckLazyClaims struct { - wire *Wire - evaluationPoints [][]fr.Element - claimedEvaluations []fr.Element - manager *claimsManager // WARNING: Circular references -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) ClaimsNum() int { - return len(e.evaluationPoints) -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) VarsNum() int { - return len(e.evaluationPoints[0]) -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) CombinedSum(a fr.Element) fr.Element { - evalsAsPoly := polynomial.Polynomial(e.claimedEvaluations) - return evalsAsPoly.Eval(&a) -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) Degree(int) int { - return 1 + e.wire.Gate.Degree() -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) VerifyFinalEval(r []fr.Element, combinationCoeff fr.Element, purportedValue fr.Element, proof interface{}) error { - inputEvaluationsNoRedundancy := proof.([]fr.Element) - - // the eq terms - numClaims := len(e.evaluationPoints) - evaluation := polynomial.EvalEq(e.evaluationPoints[numClaims-1], r) - for i := numClaims - 2; i >= 0; i-- { - evaluation.Mul(&evaluation, &combinationCoeff) - eq := polynomial.EvalEq(e.evaluationPoints[i], r) - evaluation.Add(&evaluation, &eq) - } - - // the g(...) term - var gateEvaluation fr.Element - if e.wire.IsInput() { - gateEvaluation = e.manager.assignment[e.wire].Evaluate(r, e.manager.memPool) - } else { - inputEvaluations := make([]fr.Element, len(e.wire.Inputs)) - indexesInProof := make(map[*Wire]int, len(inputEvaluationsNoRedundancy)) - - proofI := 0 - for inI, in := range e.wire.Inputs { - indexInProof, found := indexesInProof[in] - if !found { - indexInProof = proofI - indexesInProof[in] = indexInProof - - // defer verification, store new claim - e.manager.add(in, r, inputEvaluationsNoRedundancy[indexInProof]) - proofI++ - } - inputEvaluations[inI] = inputEvaluationsNoRedundancy[indexInProof] - } - if proofI != len(inputEvaluationsNoRedundancy) { - return fmt.Errorf("%d input wire evaluations given, %d expected", len(inputEvaluationsNoRedundancy), proofI) - } - gateEvaluation = e.wire.Gate.Evaluate(inputEvaluations...) - } - - evaluation.Mul(&evaluation, &gateEvaluation) - - if evaluation.Equal(&purportedValue) { - return nil - } - return fmt.Errorf("incompatible evaluations") -} - -type eqTimesGateEvalSumcheckClaims struct { - wire *Wire - evaluationPoints [][]fr.Element // x in the paper - claimedEvaluations []fr.Element // y in the paper - manager *claimsManager - - inputPreprocessors []polynomial.MultiLin // P_u in the paper, so that we don't need to pass along all the circuit's evaluations - - eq polynomial.MultiLin // ∑_i τ_i eq(x_i, -) -} - -func (c *eqTimesGateEvalSumcheckClaims) Combine(combinationCoeff fr.Element) polynomial.Polynomial { - varsNum := c.VarsNum() - eqLength := 1 << varsNum - claimsNum := c.ClaimsNum() - // initialize the eq tables - c.eq = c.manager.memPool.Make(eqLength) - - c.eq[0].SetOne() - c.eq.Eq(c.evaluationPoints[0]) - - newEq := polynomial.MultiLin(c.manager.memPool.Make(eqLength)) - aI := combinationCoeff - - for k := 1; k < claimsNum; k++ { //TODO: parallelizable? - // define eq_k = aᵏ eq(x_k1, ..., x_kn, *, ..., *) where x_ki are the evaluation points - newEq[0].Set(&aI) - newEq.Eq(c.evaluationPoints[k]) - - eqAsPoly := polynomial.Polynomial(c.eq) //just semantics - eqAsPoly.Add(eqAsPoly, polynomial.Polynomial(newEq)) - - if k+1 < claimsNum { - aI.Mul(&aI, &combinationCoeff) - } - } - - c.manager.memPool.Dump(newEq) - - // from this point on the claim is a rather simple one: g = E(h) × R_v (P_u0(h), ...) where E and the P_u are multilinear and R_v is of low-degree - - return c.computeGJ() -} - -// computeValAndStep returns val : i ↦ m(1, i...) and step : i ↦ m(1, i...) - m(0, i...) -func computeValAndStep(m polynomial.MultiLin, p *polynomial.Pool) (val polynomial.MultiLin, step polynomial.MultiLin) { - val = p.Clone(m[len(m)/2:]) - step = p.Clone(m[:len(m)/2]) - - valAsPoly, stepAsPoly := polynomial.Polynomial(val), polynomial.Polynomial(step) - - stepAsPoly.Sub(valAsPoly, stepAsPoly) - return -} - -// computeGJ: gⱼ = ∑_{0≤i<2ⁿ⁻ʲ} g(r₁, r₂, ..., rⱼ₋₁, Xⱼ, i...) = ∑_{0≤i<2ⁿ⁻ʲ} E(r₁, ..., X_j, i...) R_v( P_u0(r₁, ..., X_j, i...), ... ) where E = ∑ eq_k -// the polynomial is represented by the evaluations g_j(1), g_j(2), ..., g_j(deg(g_j)). -// The value g_j(0) is inferred from the equation g_j(0) + g_j(1) = g_{j-1}(r_{j-1}). By convention, g_0 is a constant polynomial equal to the claimed sum. -func (c *eqTimesGateEvalSumcheckClaims) computeGJ() (gJ polynomial.Polynomial) { - - // Let f ∈ { E(r₁, ..., X_j, d...) } ∪ {P_ul(r₁, ..., X_j, d...) }. It is linear in X_j, so f(m) = m×(f(1) - f(0)) + f(0), and f(0), f(1) are easily computed from the bookkeeping tables - EVal, EStep := computeValAndStep(c.eq, c.manager.memPool) - - puVal := make([]polynomial.MultiLin, len(c.inputPreprocessors)) //TODO: Make a two-dimensional array struct, and index it i-first rather than inputI first: would result in scanning memory access in the "d" loop and obviate the gateInput variable - puStep := make([]polynomial.MultiLin, len(c.inputPreprocessors)) //TODO, ctd: the greater degGJ, the more this would matter - - for i, puI := range c.inputPreprocessors { - puVal[i], puStep[i] = computeValAndStep(puI, c.manager.memPool) - } - - degGJ := 1 + c.wire.Gate.Degree() // guaranteed to be no smaller than the actual deg(g_j) - gJ = make([]fr.Element, degGJ) - - parallel := len(EVal) >= 1024 //TODO: Experiment with threshold - - var gateInput [][]fr.Element - - if parallel { - gateInput = [][]fr.Element{c.manager.memPool.Make(len(c.inputPreprocessors)), - c.manager.memPool.Make(len(c.inputPreprocessors))} - } else { - gateInput = [][]fr.Element{c.manager.memPool.Make(len(c.inputPreprocessors))} - } - - var wg sync.WaitGroup - - for d := 0; d < degGJ; d++ { - - notLastIteration := d+1 < degGJ - - sumOverI := func(res *fr.Element, gateInput []fr.Element, start, end int) { - for i := start; i < end; i++ { - - for inputI := range puVal { - gateInput[inputI].Set(&puVal[inputI][i]) - if notLastIteration { - puVal[inputI][i].Add(&puVal[inputI][i], &puStep[inputI][i]) - } - } - - // gJAtDI = gJ(d, i...) - gJAtDI := c.wire.Gate.Evaluate(gateInput...) - gJAtDI.Mul(&gJAtDI, &EVal[i]) - - res.Add(res, &gJAtDI) - - if notLastIteration { - EVal[i].Add(&EVal[i], &EStep[i]) - } - } - wg.Done() - } - - if parallel { - var firstHalf, secondHalf fr.Element - wg.Add(2) - go sumOverI(&secondHalf, gateInput[1], len(EVal)/2, len(EVal)) - go sumOverI(&firstHalf, gateInput[0], 0, len(EVal)/2) - wg.Wait() - gJ[d].Add(&firstHalf, &secondHalf) - } else { - wg.Add(1) // formalities - sumOverI(&gJ[d], gateInput[0], 0, len(EVal)) - } - } - - c.manager.memPool.Dump(gateInput...) - c.manager.memPool.Dump(EVal, EStep) - - for inputI := range puVal { - c.manager.memPool.Dump(puVal[inputI], puStep[inputI]) - } - - return -} - -// Next first folds the "preprocessing" and "eq" polynomials then compute the new g_j -func (c *eqTimesGateEvalSumcheckClaims) Next(element fr.Element) polynomial.Polynomial { - c.eq.Fold(element) - for i := 0; i < len(c.inputPreprocessors); i++ { - c.inputPreprocessors[i].Fold(element) - } - return c.computeGJ() -} - -func (c *eqTimesGateEvalSumcheckClaims) VarsNum() int { - return len(c.evaluationPoints[0]) -} - -func (c *eqTimesGateEvalSumcheckClaims) ClaimsNum() int { - return len(c.claimedEvaluations) -} - -func (c *eqTimesGateEvalSumcheckClaims) ProveFinalEval(r []fr.Element) interface{} { - - //defer the proof, return list of claims - evaluations := make([]fr.Element, 0, len(c.wire.Inputs)) - noMoreClaimsAllowed := make(map[*Wire]struct{}, len(c.inputPreprocessors)) - noMoreClaimsAllowed[c.wire] = struct{}{} - - for inI, in := range c.wire.Inputs { - puI := c.inputPreprocessors[inI] - if _, found := noMoreClaimsAllowed[in]; !found { - noMoreClaimsAllowed[in] = struct{}{} - puI.Fold(r[len(r)-1]) - c.manager.add(in, r, puI[0]) - evaluations = append(evaluations, puI[0]) - } - c.manager.memPool.Dump(puI) - } - - c.manager.memPool.Dump(c.claimedEvaluations, c.eq) - - return evaluations -} - -type claimsManager struct { - claimsMap map[*Wire]*eqTimesGateEvalSumcheckLazyClaims - assignment WireAssignment - memPool *polynomial.Pool -} - -func newClaimsManager(c Circuit, assignment WireAssignment, pool *polynomial.Pool) (claims claimsManager) { - claims.assignment = assignment - claims.claimsMap = make(map[*Wire]*eqTimesGateEvalSumcheckLazyClaims, len(c)) - claims.memPool = pool - - for i := range c { - wire := &c[i] - - claims.claimsMap[wire] = &eqTimesGateEvalSumcheckLazyClaims{ - wire: wire, - evaluationPoints: make([][]fr.Element, 0, wire.NbClaims()), - claimedEvaluations: claims.memPool.Make(wire.NbClaims()), - manager: &claims, - } - } - return -} - -func (m *claimsManager) add(wire *Wire, evaluationPoint []fr.Element, evaluation fr.Element) { - claim := m.claimsMap[wire] - i := len(claim.evaluationPoints) - claim.claimedEvaluations[i] = evaluation - claim.evaluationPoints = append(claim.evaluationPoints, evaluationPoint) -} - -func (m *claimsManager) getLazyClaim(wire *Wire) *eqTimesGateEvalSumcheckLazyClaims { - return m.claimsMap[wire] -} - -func (m *claimsManager) getClaim(wire *Wire) *eqTimesGateEvalSumcheckClaims { - lazy := m.claimsMap[wire] - res := &eqTimesGateEvalSumcheckClaims{ - wire: wire, - evaluationPoints: lazy.evaluationPoints, - claimedEvaluations: lazy.claimedEvaluations, - manager: m, - } - - if wire.IsInput() { - res.inputPreprocessors = []polynomial.MultiLin{m.memPool.Clone(m.assignment[wire])} - } else { - res.inputPreprocessors = make([]polynomial.MultiLin, len(wire.Inputs)) - - for inputI, inputW := range wire.Inputs { - res.inputPreprocessors[inputI] = m.memPool.Clone(m.assignment[inputW]) //will be edited later, so must be deep copied - } - } - return res -} - -func (m *claimsManager) deleteClaim(wire *Wire) { - delete(m.claimsMap, wire) -} - -type settings struct { - pool *polynomial.Pool - sorted []*Wire - transcript *fiatshamir.Transcript - transcriptPrefix string - nbVars int -} - -type Option func(*settings) - -func WithPool(pool *polynomial.Pool) Option { - return func(options *settings) { - options.pool = pool - } -} - -func WithSortedCircuit(sorted []*Wire) Option { - return func(options *settings) { - options.sorted = sorted - } -} - -func setup(c Circuit, assignment WireAssignment, transcriptSettings fiatshamir.Settings, options ...Option) (settings, error) { - var o settings - var err error - for _, option := range options { - option(&o) - } - - o.nbVars = assignment.NumVars() - nbInstances := assignment.NumInstances() - if 1< b { - return a - } - return b -} - -func ChallengeNames(sorted []*Wire, logNbInstances int, prefix string) []string { - - // Pre-compute the size TODO: Consider not doing this and just grow the list by appending - size := logNbInstances // first challenge - - for _, w := range sorted { - if w.noProof() { // no proof, no challenge - continue - } - if w.NbClaims() > 1 { //combine the claims - size++ - } - size += logNbInstances // full run of sumcheck on logNbInstances variables - } - - nums := make([]string, max(len(sorted), logNbInstances)) - for i := range nums { - nums[i] = strconv.Itoa(i) - } - - challenges := make([]string, size) - - // output wire claims - firstChallengePrefix := prefix + "fC." - for j := 0; j < logNbInstances; j++ { - challenges[j] = firstChallengePrefix + nums[j] - } - j := logNbInstances - for i := len(sorted) - 1; i >= 0; i-- { - if sorted[i].noProof() { - continue - } - wirePrefix := prefix + "w" + nums[i] + "." - - if sorted[i].NbClaims() > 1 { - challenges[j] = wirePrefix + "comb" - j++ - } - - partialSumPrefix := wirePrefix + "pSP." - for k := 0; k < logNbInstances; k++ { - challenges[j] = partialSumPrefix + nums[k] - j++ - } - } - return challenges -} - -func getFirstChallengeNames(logNbInstances int, prefix string) []string { - res := make([]string, logNbInstances) - firstChallengePrefix := prefix + "fC." - for i := 0; i < logNbInstances; i++ { - res[i] = firstChallengePrefix + strconv.Itoa(i) - } - return res -} - -func getChallenges(transcript *fiatshamir.Transcript, names []string) ([]fr.Element, error) { - res := make([]fr.Element, len(names)) - for i, name := range names { - if bytes, err := transcript.ComputeChallenge(name); err == nil { - res[i].SetBytes(bytes) - } else { - return nil, err - } - } - return res, nil -} - -// Prove consistency of the claimed assignment -func Prove(c Circuit, assignment WireAssignment, transcriptSettings fiatshamir.Settings, options ...Option) (Proof, error) { - o, err := setup(c, assignment, transcriptSettings, options...) - if err != nil { - return nil, err - } - - claims := newClaimsManager(c, assignment, o.pool) - - proof := make(Proof, len(c)) - // firstChallenge called rho in the paper - var firstChallenge []fr.Element - firstChallenge, err = getChallenges(o.transcript, getFirstChallengeNames(o.nbVars, o.transcriptPrefix)) - if err != nil { - return nil, err - } - - wirePrefix := o.transcriptPrefix + "w" - var baseChallenge [][]byte - for i := len(c) - 1; i >= 0; i-- { - - wire := o.sorted[i] - - if wire.IsOutput() { - claims.add(wire, firstChallenge, assignment[wire].Evaluate(firstChallenge, claims.memPool)) - } - - claim := claims.getClaim(wire) - if wire.noProof() { // input wires with one claim only - proof[i] = sumcheck.Proof{ - PartialSumPolys: []polynomial.Polynomial{}, - FinalEvalProof: []fr.Element{}, - } - } else { - if proof[i], err = sumcheck.Prove( - claim, fiatshamir.WithTranscript(o.transcript, wirePrefix+strconv.Itoa(i)+".", baseChallenge...), - ); err != nil { - return proof, err - } - - finalEvalProof := proof[i].FinalEvalProof.([]fr.Element) - baseChallenge = make([][]byte, len(finalEvalProof)) - for j := range finalEvalProof { - bytes := finalEvalProof[j].Bytes() - baseChallenge[j] = bytes[:] - } - } - // the verifier checks a single claim about input wires itself - claims.deleteClaim(wire) - } - - return proof, nil -} - -// Verify the consistency of the claimed output with the claimed input -// Unlike in Prove, the assignment argument need not be complete -func Verify(c Circuit, assignment WireAssignment, proof Proof, transcriptSettings fiatshamir.Settings, options ...Option) error { - o, err := setup(c, assignment, transcriptSettings, options...) - if err != nil { - return err - } - - claims := newClaimsManager(c, assignment, o.pool) - - var firstChallenge []fr.Element - firstChallenge, err = getChallenges(o.transcript, getFirstChallengeNames(o.nbVars, o.transcriptPrefix)) - if err != nil { - return err - } - - wirePrefix := o.transcriptPrefix + "w" - var baseChallenge [][]byte - for i := len(c) - 1; i >= 0; i-- { - wire := o.sorted[i] - - if wire.IsOutput() { - claims.add(wire, firstChallenge, assignment[wire].Evaluate(firstChallenge, claims.memPool)) - } - - proofW := proof[i] - finalEvalProof := proofW.FinalEvalProof.([]fr.Element) - claim := claims.getLazyClaim(wire) - if wire.noProof() { // input wires with one claim only - // make sure the proof is empty - if len(finalEvalProof) != 0 || len(proofW.PartialSumPolys) != 0 { - return fmt.Errorf("no proof allowed for input wire with a single claim") - } - - if wire.NbClaims() == 1 { // input wire - // simply evaluate and see if it matches - evaluation := assignment[wire].Evaluate(claim.evaluationPoints[0], claims.memPool) - if !claim.claimedEvaluations[0].Equal(&evaluation) { - return fmt.Errorf("incorrect input wire claim") - } - } - } else if err = sumcheck.Verify( - claim, proof[i], fiatshamir.WithTranscript(o.transcript, wirePrefix+strconv.Itoa(i)+".", baseChallenge...), - ); err == nil { - baseChallenge = make([][]byte, len(finalEvalProof)) - for j := range finalEvalProof { - bytes := finalEvalProof[j].Bytes() - baseChallenge[j] = bytes[:] - } - } else { - return fmt.Errorf("sumcheck proof rejected: %v", err) //TODO: Any polynomials to dump? - } - claims.deleteClaim(wire) - } - return nil -} - -type IdentityGate struct{} - -func (IdentityGate) Evaluate(input ...fr.Element) fr.Element { - return input[0] -} - -func (IdentityGate) Degree() int { - return 1 -} - -// outputsList also sets the nbUniqueOutputs fields. It also sets the wire metadata. -func outputsList(c Circuit, indexes map[*Wire]int) [][]int { - res := make([][]int, len(c)) - for i := range c { - res[i] = make([]int, 0) - c[i].nbUniqueOutputs = 0 - if c[i].IsInput() { - c[i].Gate = IdentityGate{} - } - } - ins := make(map[int]struct{}, len(c)) - for i := range c { - for k := range ins { // clear map - delete(ins, k) - } - for _, in := range c[i].Inputs { - inI := indexes[in] - res[inI] = append(res[inI], i) - if _, ok := ins[inI]; !ok { - in.nbUniqueOutputs++ - ins[inI] = struct{}{} - } - } - } - return res -} - -type topSortData struct { - outputs [][]int - status []int // status > 0 indicates number of inputs left to be ready. status = 0 means ready. status = -1 means done - index map[*Wire]int - leastReady int -} - -func (d *topSortData) markDone(i int) { - - d.status[i] = -1 - - for _, outI := range d.outputs[i] { - d.status[outI]-- - if d.status[outI] == 0 && outI < d.leastReady { - d.leastReady = outI - } - } - - for d.leastReady < len(d.status) && d.status[d.leastReady] != 0 { - d.leastReady++ - } -} - -func indexMap(c Circuit) map[*Wire]int { - res := make(map[*Wire]int, len(c)) - for i := range c { - res[&c[i]] = i - } - return res -} - -func statusList(c Circuit) []int { - res := make([]int, len(c)) - for i := range c { - res[i] = len(c[i].Inputs) - } - return res -} - -// topologicalSort sorts the wires in order of dependence. Such that for any wire, any one it depends on -// occurs before it. It tries to stick to the input order as much as possible. An already sorted list will remain unchanged. -// It also sets the nbOutput flags, and a dummy IdentityGate for input wires. -// Worst-case inefficient O(n^2), but that probably won't matter since the circuits are small. -// Furthermore, it is efficient with already-close-to-sorted lists, which are the expected input -func topologicalSort(c Circuit) []*Wire { - var data topSortData - data.index = indexMap(c) - data.outputs = outputsList(c, data.index) - data.status = statusList(c) - sorted := make([]*Wire, len(c)) - - for data.leastReady = 0; data.status[data.leastReady] != 0; data.leastReady++ { - } - - for i := range c { - sorted[i] = &c[data.leastReady] - data.markDone(data.leastReady) - } - - return sorted -} - -// Complete the circuit evaluation from input values -func (a WireAssignment) Complete(c Circuit) WireAssignment { - - sortedWires := topologicalSort(c) - - numEvaluations := 0 - - for _, w := range sortedWires { - if !w.IsInput() { - if numEvaluations == 0 { - numEvaluations = len(a[w.Inputs[0]]) - } - evals := make([]fr.Element, numEvaluations) - ins := make([]fr.Element, len(w.Inputs)) - for k := 0; k < numEvaluations; k++ { - for inI, in := range w.Inputs { - ins[inI] = a[in][k] - } - evals[k] = w.Gate.Evaluate(ins...) - } - a[w] = evals - } - } - return a -} - -func (a WireAssignment) NumInstances() int { - for _, aW := range a { - return len(aW) - } - panic("empty assignment") -} - -func (a WireAssignment) NumVars() int { - for _, aW := range a { - return aW.NumVars() - } - panic("empty assignment") -} diff --git a/tools/gnark/bls12_381/fr/gkr/gkr_test.go b/tools/gnark/bls12_381/fr/gkr/gkr_test.go deleted file mode 100644 index 2dbc0e90..00000000 --- a/tools/gnark/bls12_381/fr/gkr/gkr_test.go +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package gkr - -import ( - "encoding/json" - "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/mimc" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/polynomial" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/sumcheck" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/test_vector_utils" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "github.com/stretchr/testify/assert" - "os" - "path/filepath" - "reflect" - "strconv" - "testing" -) - -func TestNoGateTwoInstances(t *testing.T) { - // Testing a single instance is not possible because the sumcheck implementation doesn't cover the trivial 0-variate case - testNoGate(t, []fr.Element{four, three}) -} - -func TestNoGate(t *testing.T) { - testManyInstances(t, 1, testNoGate) -} - -func TestSingleMulGateTwoInstances(t *testing.T) { - testSingleMulGate(t, []fr.Element{four, three}, []fr.Element{two, three}) -} - -func TestSingleMulGate(t *testing.T) { - testManyInstances(t, 2, testSingleMulGate) -} - -func TestSingleInputTwoIdentityGatesTwoInstances(t *testing.T) { - - testSingleInputTwoIdentityGates(t, []fr.Element{two, three}) -} - -func TestSingleInputTwoIdentityGates(t *testing.T) { - - testManyInstances(t, 2, testSingleInputTwoIdentityGates) -} - -func TestSingleInputTwoIdentityGatesComposedTwoInstances(t *testing.T) { - testSingleInputTwoIdentityGatesComposed(t, []fr.Element{two, one}) -} - -func TestSingleInputTwoIdentityGatesComposed(t *testing.T) { - testManyInstances(t, 1, testSingleInputTwoIdentityGatesComposed) -} - -func TestSingleMimcCipherGateTwoInstances(t *testing.T) { - testSingleMimcCipherGate(t, []fr.Element{one, one}, []fr.Element{one, two}) -} - -func TestSingleMimcCipherGate(t *testing.T) { - testManyInstances(t, 2, testSingleMimcCipherGate) -} - -func TestATimesBSquaredTwoInstances(t *testing.T) { - testATimesBSquared(t, 2, []fr.Element{one, one}, []fr.Element{one, two}) -} - -func TestShallowMimcTwoInstances(t *testing.T) { - testMimc(t, 2, []fr.Element{one, one}, []fr.Element{one, two}) -} -func TestMimcTwoInstances(t *testing.T) { - testMimc(t, 93, []fr.Element{one, one}, []fr.Element{one, two}) -} - -func TestMimc(t *testing.T) { - testManyInstances(t, 2, generateTestMimc(93)) -} - -func generateTestMimc(numRounds int) func(*testing.T, ...[]fr.Element) { - return func(t *testing.T, inputAssignments ...[]fr.Element) { - testMimc(t, numRounds, inputAssignments...) - } -} - -func TestSumcheckFromSingleInputTwoIdentityGatesGateTwoInstances(t *testing.T) { - circuit := Circuit{Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{}, - nbUniqueOutputs: 2, - }} - - wire := &circuit[0] - - assignment := WireAssignment{&circuit[0]: []fr.Element{two, three}} - pool := polynomial.NewPool(256, 1<<11) - - claimsManagerGen := func() *claimsManager { - manager := newClaimsManager(circuit, assignment, &pool) - manager.add(wire, []fr.Element{three}, five) - manager.add(wire, []fr.Element{four}, six) - return &manager - } - - transcriptGen := test_vector_utils.NewMessageCounterGenerator(4, 1) - - proof, err := sumcheck.Prove(claimsManagerGen().getClaim(wire), fiatshamir.WithHash(transcriptGen(), nil)) - assert.NoError(t, err) - err = sumcheck.Verify(claimsManagerGen().getLazyClaim(wire), proof, fiatshamir.WithHash(transcriptGen(), nil)) - assert.NoError(t, err) -} - -var one, two, three, four, five, six fr.Element - -func init() { - one.SetOne() - two.Double(&one) - three.Add(&two, &one) - four.Double(&two) - five.Add(&three, &two) - six.Double(&three) -} - -var testManyInstancesLogMaxInstances = -1 - -func getLogMaxInstances(t *testing.T) int { - if testManyInstancesLogMaxInstances == -1 { - - s := os.Getenv("GKR_LOG_INSTANCES") - if s == "" { - testManyInstancesLogMaxInstances = 5 - } else { - var err error - testManyInstancesLogMaxInstances, err = strconv.Atoi(s) - if err != nil { - t.Error(err) - } - } - - } - return testManyInstancesLogMaxInstances -} - -func testManyInstances(t *testing.T, numInput int, test func(*testing.T, ...[]fr.Element)) { - fullAssignments := make([][]fr.Element, numInput) - maxSize := 1 << getLogMaxInstances(t) - - t.Log("Entered test orchestrator, assigning and randomizing inputs") - - for i := range fullAssignments { - fullAssignments[i] = make([]fr.Element, maxSize) - setRandom(fullAssignments[i]) - } - - inputAssignments := make([][]fr.Element, numInput) - for numEvals := maxSize; numEvals <= maxSize; numEvals *= 2 { - for i, fullAssignment := range fullAssignments { - inputAssignments[i] = fullAssignment[:numEvals] - } - - t.Log("Selected inputs for test") - test(t, inputAssignments...) - } -} - -func testNoGate(t *testing.T, inputAssignments ...[]fr.Element) { - c := Circuit{ - { - Inputs: []*Wire{}, - Gate: nil, - }, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0]} - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err) - - // Even though a hash is called here, the proof is empty - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err, "proof rejected") -} - -func testSingleMulGate(t *testing.T, inputAssignments ...[]fr.Element) { - - c := make(Circuit, 3) - c[2] = Wire{ - Gate: mulGate{}, - Inputs: []*Wire{&c[0], &c[1]}, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func testSingleInputTwoIdentityGates(t *testing.T, inputAssignments ...[]fr.Element) { - c := make(Circuit, 3) - - c[1] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[0]}, - } - - c[2] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[0]}, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func testSingleMimcCipherGate(t *testing.T, inputAssignments ...[]fr.Element) { - c := make(Circuit, 3) - - c[2] = Wire{ - Gate: mimcCipherGate{}, - Inputs: []*Wire{&c[0], &c[1]}, - } - - t.Log("Evaluating all circuit wires") - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - t.Log("Circuit evaluation complete") - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - t.Log("Proof complete") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - t.Log("Successful verification complete") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") - t.Log("Unsuccessful verification complete") -} - -func testSingleInputTwoIdentityGatesComposed(t *testing.T, inputAssignments ...[]fr.Element) { - c := make(Circuit, 3) - - c[1] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[0]}, - } - c[2] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[1]}, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func mimcCircuit(numRounds int) Circuit { - c := make(Circuit, numRounds+2) - - for i := 2; i < len(c); i++ { - c[i] = Wire{ - Gate: mimcCipherGate{}, - Inputs: []*Wire{&c[i-1], &c[0]}, - } - } - return c -} - -func testMimc(t *testing.T, numRounds int, inputAssignments ...[]fr.Element) { - //TODO: Implement mimc correctly. Currently, the computation is mimc(a,b) = cipher( cipher( ... cipher(a, b), b) ..., b) - // @AlexandreBelling: Please explain the extra layers in https://github.com/ConsenSys/gkr-mimc/blob/81eada039ab4ed403b7726b535adb63026e8011f/examples/mimc.go#L10 - - c := mimcCircuit(numRounds) - - t.Log("Evaluating all circuit wires") - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - t.Log("Circuit evaluation complete") - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - t.Log("Proof finished") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - t.Log("Successful verification finished") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") - t.Log("Unsuccessful verification finished") -} - -func testATimesBSquared(t *testing.T, numRounds int, inputAssignments ...[]fr.Element) { - // This imitates the MiMC circuit - - c := make(Circuit, numRounds+2) - - for i := 2; i < len(c); i++ { - c[i] = Wire{ - Gate: mulGate{}, - Inputs: []*Wire{&c[i-1], &c[0]}, - } - } - - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func setRandom(slice []fr.Element) { - for i := range slice { - slice[i].SetRandom() - } -} - -func generateTestProver(path string) func(t *testing.T) { - return func(t *testing.T) { - testCase, err := newTestCase(path) - assert.NoError(t, err) - proof, err := Prove(testCase.Circuit, testCase.FullAssignment, testCase.transcriptSetting()) - assert.NoError(t, err) - assert.NoError(t, proofEquals(testCase.Proof, proof)) - } -} - -func generateTestVerifier(path string) func(t *testing.T) { - return func(t *testing.T) { - testCase, err := newTestCase(path) - assert.NoError(t, err) - err = Verify(testCase.Circuit, testCase.InOutAssignment, testCase.Proof, testCase.transcriptSetting()) - assert.NoError(t, err, "proof rejected") - testCase, err = newTestCase(path) - assert.NoError(t, err) - err = Verify(testCase.Circuit, testCase.InOutAssignment, testCase.Proof, fiatshamir.WithHash(&test_vector_utils.MapHash{Map: testCase.Hash}, []byte{1})) - assert.NotNil(t, err, "bad proof accepted") - } -} - -func TestGkrVectors(t *testing.T) { - - testDirPath := "../../../../internal/generator/gkr/test_vectors" - dirEntries, err := os.ReadDir(testDirPath) - assert.NoError(t, err) - for _, dirEntry := range dirEntries { - if !dirEntry.IsDir() { - - if filepath.Ext(dirEntry.Name()) == ".json" { - path := filepath.Join(testDirPath, dirEntry.Name()) - noExt := dirEntry.Name()[:len(dirEntry.Name())-len(".json")] - - t.Run(noExt+"_prover", generateTestProver(path)) - t.Run(noExt+"_verifier", generateTestVerifier(path)) - - } - } - } -} - -func proofEquals(expected Proof, seen Proof) error { - if len(expected) != len(seen) { - return fmt.Errorf("length mismatch %d ≠ %d", len(expected), len(seen)) - } - for i, x := range expected { - xSeen := seen[i] - - if xSeen.FinalEvalProof == nil { - if seenFinalEval := x.FinalEvalProof.([]fr.Element); len(seenFinalEval) != 0 { - return fmt.Errorf("length mismatch %d ≠ %d", 0, len(seenFinalEval)) - } - } else { - if err := test_vector_utils.SliceEquals(x.FinalEvalProof.([]fr.Element), xSeen.FinalEvalProof.([]fr.Element)); err != nil { - return fmt.Errorf("final evaluation proof mismatch") - } - } - if err := test_vector_utils.PolynomialSliceEquals(x.PartialSumPolys, xSeen.PartialSumPolys); err != nil { - return err - } - } - return nil -} - -func BenchmarkGkrMimc(b *testing.B) { - const N = 1 << 19 - fmt.Println("creating circuit structure") - c := mimcCircuit(91) - - in0 := make([]fr.Element, N) - in1 := make([]fr.Element, N) - setRandom(in0) - setRandom(in1) - - fmt.Println("evaluating circuit") - assignment := WireAssignment{&c[0]: in0, &c[1]: in1}.Complete(c) - - //b.ResetTimer() - fmt.Println("constructing proof") - Prove(c, assignment, fiatshamir.WithHash(mimc.NewMiMC())) -} - -func TestTopSortTrivial(t *testing.T) { - c := make(Circuit, 2) - c[0].Inputs = []*Wire{&c[1]} - sorted := topologicalSort(c) - assert.Equal(t, []*Wire{&c[1], &c[0]}, sorted) -} - -func TestTopSortDeep(t *testing.T) { - c := make(Circuit, 4) - c[0].Inputs = []*Wire{&c[2]} - c[1].Inputs = []*Wire{&c[3]} - c[2].Inputs = []*Wire{} - c[3].Inputs = []*Wire{&c[0]} - sorted := topologicalSort(c) - assert.Equal(t, []*Wire{&c[2], &c[0], &c[3], &c[1]}, sorted) -} - -func TestTopSortWide(t *testing.T) { - c := make(Circuit, 10) - c[0].Inputs = []*Wire{&c[3], &c[8]} - c[1].Inputs = []*Wire{&c[6]} - c[2].Inputs = []*Wire{&c[4]} - c[3].Inputs = []*Wire{} - c[4].Inputs = []*Wire{} - c[5].Inputs = []*Wire{&c[9]} - c[6].Inputs = []*Wire{&c[9]} - c[7].Inputs = []*Wire{&c[9], &c[5], &c[2]} - c[8].Inputs = []*Wire{&c[4], &c[3]} - c[9].Inputs = []*Wire{} - - sorted := topologicalSort(c) - sortedExpected := []*Wire{&c[3], &c[4], &c[2], &c[8], &c[0], &c[9], &c[5], &c[6], &c[1], &c[7]} - - assert.Equal(t, sortedExpected, sorted) -} - -type WireInfo struct { - Gate string `json:"gate"` - Inputs []int `json:"inputs"` -} - -type CircuitInfo []WireInfo - -var circuitCache = make(map[string]Circuit) - -func getCircuit(path string) (Circuit, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - if circuit, ok := circuitCache[path]; ok { - return circuit, nil - } - var bytes []byte - if bytes, err = os.ReadFile(path); err == nil { - var circuitInfo CircuitInfo - if err = json.Unmarshal(bytes, &circuitInfo); err == nil { - circuit := circuitInfo.toCircuit() - circuitCache[path] = circuit - return circuit, nil - } else { - return nil, err - } - } else { - return nil, err - } -} - -func (c CircuitInfo) toCircuit() (circuit Circuit) { - circuit = make(Circuit, len(c)) - for i := range c { - circuit[i].Gate = gates[c[i].Gate] - circuit[i].Inputs = make([]*Wire, len(c[i].Inputs)) - for k, inputCoord := range c[i].Inputs { - input := &circuit[inputCoord] - circuit[i].Inputs[k] = input - } - } - return -} - -var gates map[string]Gate - -func init() { - gates = make(map[string]Gate) - gates["identity"] = IdentityGate{} - gates["mul"] = mulGate{} - gates["mimc"] = mimcCipherGate{} //TODO: Add ark - gates["select-input-3"] = _select(2) -} - -type mimcCipherGate struct { - ark fr.Element -} - -func (m mimcCipherGate) Evaluate(input ...fr.Element) (res fr.Element) { - var sum fr.Element - - sum. - Add(&input[0], &input[1]). - Add(&sum, &m.ark) - - res.Square(&sum) // sum^2 - res.Mul(&res, &sum) // sum^3 - res.Square(&res) //sum^6 - res.Mul(&res, &sum) //sum^7 - - return -} - -func (m mimcCipherGate) Degree() int { - return 7 -} - -type PrintableProof []PrintableSumcheckProof - -type PrintableSumcheckProof struct { - FinalEvalProof interface{} `json:"finalEvalProof"` - PartialSumPolys [][]interface{} `json:"partialSumPolys"` -} - -func unmarshalProof(printable PrintableProof) (Proof, error) { - proof := make(Proof, len(printable)) - for i := range printable { - finalEvalProof := []fr.Element(nil) - - if printable[i].FinalEvalProof != nil { - finalEvalSlice := reflect.ValueOf(printable[i].FinalEvalProof) - finalEvalProof = make([]fr.Element, finalEvalSlice.Len()) - for k := range finalEvalProof { - if _, err := test_vector_utils.SetElement(&finalEvalProof[k], finalEvalSlice.Index(k).Interface()); err != nil { - return nil, err - } - } - } - - proof[i] = sumcheck.Proof{ - PartialSumPolys: make([]polynomial.Polynomial, len(printable[i].PartialSumPolys)), - FinalEvalProof: finalEvalProof, - } - for k := range printable[i].PartialSumPolys { - var err error - if proof[i].PartialSumPolys[k], err = test_vector_utils.SliceToElementSlice(printable[i].PartialSumPolys[k]); err != nil { - return nil, err - } - } - } - return proof, nil -} - -type TestCase struct { - Circuit Circuit - Hash *test_vector_utils.ElementMap - Proof Proof - FullAssignment WireAssignment - InOutAssignment WireAssignment -} - -type TestCaseInfo struct { - Hash string `json:"hash"` - Circuit string `json:"circuit"` - Input [][]interface{} `json:"input"` - Output [][]interface{} `json:"output"` - Proof PrintableProof `json:"proof"` -} - -var testCases = make(map[string]*TestCase) - -func newTestCase(path string) (*TestCase, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - dir := filepath.Dir(path) - - tCase, ok := testCases[path] - if !ok { - var bytes []byte - if bytes, err = os.ReadFile(path); err == nil { - var info TestCaseInfo - err = json.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - - var circuit Circuit - if circuit, err = getCircuit(filepath.Join(dir, info.Circuit)); err != nil { - return nil, err - } - var _hash *test_vector_utils.ElementMap - if _hash, err = test_vector_utils.ElementMapFromFile(filepath.Join(dir, info.Hash)); err != nil { - return nil, err - } - var proof Proof - if proof, err = unmarshalProof(info.Proof); err != nil { - return nil, err - } - - fullAssignment := make(WireAssignment) - inOutAssignment := make(WireAssignment) - - sorted := topologicalSort(circuit) - - inI, outI := 0, 0 - for _, w := range sorted { - var assignmentRaw []interface{} - if w.IsInput() { - if inI == len(info.Input) { - return nil, fmt.Errorf("fewer input in vector than in circuit") - } - assignmentRaw = info.Input[inI] - inI++ - } else if w.IsOutput() { - if outI == len(info.Output) { - return nil, fmt.Errorf("fewer output in vector than in circuit") - } - assignmentRaw = info.Output[outI] - outI++ - } - if assignmentRaw != nil { - var wireAssignment []fr.Element - if wireAssignment, err = test_vector_utils.SliceToElementSlice(assignmentRaw); err != nil { - return nil, err - } - - fullAssignment[w] = wireAssignment - inOutAssignment[w] = wireAssignment - } - } - - fullAssignment.Complete(circuit) - - for _, w := range sorted { - if w.IsOutput() { - - if err = test_vector_utils.SliceEquals(inOutAssignment[w], fullAssignment[w]); err != nil { - return nil, fmt.Errorf("assignment mismatch: %v", err) - } - - } - } - - tCase = &TestCase{ - FullAssignment: fullAssignment, - InOutAssignment: inOutAssignment, - Proof: proof, - Hash: _hash, - Circuit: circuit, - } - - testCases[path] = tCase - } else { - return nil, err - } - } - - return tCase, nil -} - -func (c *TestCase) transcriptSetting(initialChallenge ...[]byte) fiatshamir.Settings { - return fiatshamir.WithHash(&test_vector_utils.MapHash{Map: c.Hash}, initialChallenge...) -} - -type mulGate struct{} - -func (g mulGate) Evaluate(element ...fr.Element) (result fr.Element) { - result.Mul(&element[0], &element[1]) - return -} - -func (g mulGate) Degree() int { - return 2 -} - -type _select int - -func (g _select) Evaluate(in ...fr.Element) fr.Element { - return in[g] -} - -func (g _select) Degree() int { - return 1 -} diff --git a/tools/gnark/bls12_381/fr/kzg/doc.go b/tools/gnark/bls12_381/fr/kzg/doc.go deleted file mode 100644 index d8a77e8f..00000000 --- a/tools/gnark/bls12_381/fr/kzg/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package kzg provides a KZG commitment scheme. -package kzg diff --git a/tools/gnark/bls12_381/fr/kzg/kzg.go b/tools/gnark/bls12_381/fr/kzg/kzg.go deleted file mode 100644 index e7e4d3d3..00000000 --- a/tools/gnark/bls12_381/fr/kzg/kzg.go +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package kzg - -import ( - "errors" - "hash" - "math/big" - "sync" - - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/ecc/bls12-381" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrInvalidNbDigests = errors.New("number of digests is not the same as the number of polynomials") - ErrInvalidPolynomialSize = errors.New("invalid polynomial size (larger than SRS or == 0)") - ErrVerifyOpeningProof = errors.New("can't verify opening proof") - ErrVerifyBatchOpeningSinglePoint = errors.New("can't verify batch opening proof at single point") - ErrMinSRSSize = errors.New("minimum srs size is 2") -) - -// Digest commitment of a polynomial. -type Digest = bls12381.G1Affine - -// SRS stores the result of the MPC -type SRS struct { - G1 []bls12381.G1Affine // [G₁ [α]G₁ , [α²]G₁, ... ] - G2 [2]bls12381.G2Affine // [G₂, [α]G₂ ] -} - -// eval returns p(point) where p is interpreted as a polynomial -// ∑_{i= 0; i-- { - res.Mul(&res, &point).Add(&res, &p[i]) - } - return res -} - -// NewSRS returns a new SRS using alpha as randomness source -// -// In production, a SRS generated through MPC should be used. -// -// implements io.ReaderFrom and io.WriterTo -func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { - - if size < 2 { - return nil, ErrMinSRSSize - } - - var srs SRS - srs.G1 = make([]bls12381.G1Affine, size) - - var alpha fr.Element - alpha.SetBigInt(bAlpha) - - _, _, gen1Aff, gen2Aff := bls12381.Generators() - srs.G1[0] = gen1Aff - srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) - - alphas := make([]fr.Element, size-1) - alphas[0] = alpha - for i := 1; i < len(alphas); i++ { - alphas[i].Mul(&alphas[i-1], &alpha) - } - g1s := bls12381.BatchScalarMultiplicationG1(&gen1Aff, alphas) - copy(srs.G1[1:], g1s) - - return &srs, nil -} - -// OpeningProof KZG proof for opening at a single point. -// -// implements io.ReaderFrom and io.WriterTo -type OpeningProof struct { - // H quotient polynomial (f - f(z))/(x-z) - H bls12381.G1Affine - - // ClaimedValue purported value - ClaimedValue fr.Element -} - -// BatchOpeningProof opening proof for many polynomials at the same point -// -// implements io.ReaderFrom and io.WriterTo -type BatchOpeningProof struct { - // H quotient polynomial Sum_i gamma**i*(f - f(z))/(x-z) - H bls12381.G1Affine - - // ClaimedValues purported values - ClaimedValues []fr.Element -} - -// Commit commits to a polynomial using a multi exponentiation with the SRS. -// It is assumed that the polynomial is in canonical form, in Montgomery form. -func Commit(p []fr.Element, srs *SRS, nbTasks ...int) (Digest, error) { - - if len(p) == 0 || len(p) > len(srs.G1) { - return Digest{}, ErrInvalidPolynomialSize - } - - var res bls12381.G1Affine - - config := ecc.MultiExpConfig{} - if len(nbTasks) > 0 { - config.NbTasks = nbTasks[0] - } - if _, err := res.MultiExp(srs.G1[:len(p)], p, config); err != nil { - return Digest{}, err - } - - return res, nil -} - -// Open computes an opening proof of polynomial p at given point. -// fft.Domain Cardinality must be larger than p.Degree() -func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { - if len(p) == 0 || len(p) > len(srs.G1) { - return OpeningProof{}, ErrInvalidPolynomialSize - } - - // build the proof - res := OpeningProof{ - ClaimedValue: eval(p, point), - } - - // compute H - _p := make([]fr.Element, len(p)) - copy(_p, p) - h := dividePolyByXminusA(_p, res.ClaimedValue, point) - - _p = nil // h re-use this memory - - // commit to H - hCommit, err := Commit(h, srs) - if err != nil { - return OpeningProof{}, err - } - res.H.Set(&hCommit) - - return res, nil -} - -// Verify verifies a KZG opening proof at a single point -func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { - - // [f(a)]G₁ - var claimedValueG1Aff bls12381.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bls12381.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bls12381.G1Affine - negH.Neg(&proof.H) - - // [α-a]G₂ - var alphaMinusaG2Jac, genG2Jac, alphaG2Jac bls12381.G2Jac - var pointBigInt big.Int - point.ToBigIntRegular(&pointBigInt) - genG2Jac.FromAffine(&srs.G2[0]) - alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). - Neg(&alphaMinusaG2Jac). - AddAssign(&alphaG2Jac) - - // [α-a]G₂ - var xminusaG2Aff bls12381.G2Affine - xminusaG2Aff.FromJacobian(&alphaMinusaG2Jac) - - // [f(α) - f(a)]G₁ - var fminusfaG1Aff bls12381.G1Affine - fminusfaG1Aff.FromJacobian(&fminusfaG1Jac) - - // e([f(α) - f(a)]G₁, G₂).e([-H(α)]G₁, [α-a]G₂) ==? 1 - check, err := bls12381.PairingCheck( - []bls12381.G1Affine{fminusfaG1Aff, negH}, - []bls12381.G2Affine{srs.G2[0], xminusaG2Aff}, - ) - if err != nil { - return err - } - if !check { - return ErrVerifyOpeningProof - } - return nil -} - -// BatchOpenSinglePoint creates a batch opening proof at point of a list of polynomials. -// It's an interactive protocol, made non interactive using Fiat Shamir. -// -// * point is the point at which the polynomials are opened. -// * digests is the list of committed polynomials to open, need to derive the challenge using Fiat Shamir. -// * polynomials is the list of polynomials to open, they are supposed to be of the same size. -func BatchOpenSinglePoint(polynomials [][]fr.Element, digests []Digest, point fr.Element, hf hash.Hash, srs *SRS) (BatchOpeningProof, error) { - - // check for invalid sizes - nbDigests := len(digests) - if nbDigests != len(polynomials) { - return BatchOpeningProof{}, ErrInvalidNbDigests - } - - // TODO ensure the polynomials are of the same size - largestPoly := -1 - for _, p := range polynomials { - if len(p) == 0 || len(p) > len(srs.G1) { - return BatchOpeningProof{}, ErrInvalidPolynomialSize - } - if len(p) > largestPoly { - largestPoly = len(p) - } - } - - var res BatchOpeningProof - - // compute the purported values - res.ClaimedValues = make([]fr.Element, len(polynomials)) - var wg sync.WaitGroup - wg.Add(len(polynomials)) - for i := 0; i < len(polynomials); i++ { - go func(_i int) { - res.ClaimedValues[_i] = eval(polynomials[_i], point) - wg.Done() - }(i) - } - - // derive the challenge γ, binded to the point and the commitments - gamma, err := deriveGamma(point, digests, hf) - if err != nil { - return BatchOpeningProof{}, err - } - - // ∑ᵢγⁱf(a) - var foldedEvaluations fr.Element - chSumGammai := make(chan struct{}, 1) - go func() { - // wait for polynomial evaluations to be completed (res.ClaimedValues) - wg.Wait() - foldedEvaluations = res.ClaimedValues[nbDigests-1] - for i := nbDigests - 2; i >= 0; i-- { - foldedEvaluations.Mul(&foldedEvaluations, &gamma). - Add(&foldedEvaluations, &res.ClaimedValues[i]) - } - close(chSumGammai) - }() - - // compute ∑ᵢγⁱfᵢ - // note: if we are willing to paralellize that, we could clone the poly and scale them by - // gamma n in parallel, before reducing into foldedPolynomials - foldedPolynomials := make([]fr.Element, largestPoly) - copy(foldedPolynomials, polynomials[0]) - acc := gamma - var pj fr.Element - for i := 1; i < len(polynomials); i++ { - for j := 0; j < len(polynomials[i]); j++ { - pj.Mul(&polynomials[i][j], &acc) - foldedPolynomials[j].Add(&foldedPolynomials[j], &pj) - } - acc.Mul(&acc, &gamma) - } - - // compute H - <-chSumGammai - h := dividePolyByXminusA(foldedPolynomials, foldedEvaluations, point) - foldedPolynomials = nil // same memory as h - - res.H, err = Commit(h, srs) - if err != nil { - return BatchOpeningProof{}, err - } - - return res, nil -} - -// FoldProof fold the digests and the proofs in batchOpeningProof using Fiat Shamir -// to obtain an opening proof at a single point. -// -// * digests list of digests on which batchOpeningProof is based -// * batchOpeningProof opening proof of digests -// * returns the folded version of batchOpeningProof, Digest, the folded version of digests -func FoldProof(digests []Digest, batchOpeningProof *BatchOpeningProof, point fr.Element, hf hash.Hash) (OpeningProof, Digest, error) { - - nbDigests := len(digests) - - // check consistancy between numbers of claims vs number of digests - if nbDigests != len(batchOpeningProof.ClaimedValues) { - return OpeningProof{}, Digest{}, ErrInvalidNbDigests - } - - // derive the challenge γ, binded to the point and the commitments - gamma, err := deriveGamma(point, digests, hf) - if err != nil { - return OpeningProof{}, Digest{}, ErrInvalidNbDigests - } - - // fold the claimed values and digests - // gammai = [1,γ,γ²,..,γⁿ⁻¹] - gammai := make([]fr.Element, nbDigests) - gammai[0].SetOne() - for i := 1; i < nbDigests; i++ { - gammai[i].Mul(&gammai[i-1], &gamma) - } - - foldedDigests, foldedEvaluations, err := fold(digests, batchOpeningProof.ClaimedValues, gammai) - if err != nil { - return OpeningProof{}, Digest{}, err - } - - // create the folded opening proof - var res OpeningProof - res.ClaimedValue.Set(&foldedEvaluations) - res.H.Set(&batchOpeningProof.H) - - return res, foldedDigests, nil -} - -// BatchVerifySinglePoint verifies a batched opening proof at a single point of a list of polynomials. -// -// * digests list of digests on which opening proof is done -// * batchOpeningProof proof of correct opening on the digests -func BatchVerifySinglePoint(digests []Digest, batchOpeningProof *BatchOpeningProof, point fr.Element, hf hash.Hash, srs *SRS) error { - - // fold the proof - foldedProof, foldedDigest, err := FoldProof(digests, batchOpeningProof, point, hf) - if err != nil { - return err - } - - // verify the foldedProof againts the foldedDigest - err = Verify(&foldedDigest, &foldedProof, point, srs) - return err - -} - -// BatchVerifyMultiPoints batch verifies a list of opening proofs at different points. -// The purpose of the batching is to have only one pairing for verifying several proofs. -// -// * digests list of committed polynomials -// * proofs list of opening proofs, one for each digest -// * points the list of points at which the opening are done -func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr.Element, srs *SRS) error { - - // check consistancy nb proogs vs nb digests - if len(digests) != len(proofs) || len(digests) != len(points) { - return ErrInvalidNbDigests - } - - // if only one digest, call Verify - if len(digests) == 1 { - return Verify(&digests[0], &proofs[0], points[0], srs) - } - - // sample random numbers λᵢ for sampling - randomNumbers := make([]fr.Element, len(digests)) - randomNumbers[0].SetOne() - for i := 1; i < len(randomNumbers); i++ { - _, err := randomNumbers[i].SetRandom() - if err != nil { - return err - } - } - - // fold the committed quotients compute ∑ᵢλᵢ[Hᵢ(α)]G₁ - var foldedQuotients bls12381.G1Affine - quotients := make([]bls12381.G1Affine, len(proofs)) - for i := 0; i < len(randomNumbers); i++ { - quotients[i].Set(&proofs[i].H) - } - config := ecc.MultiExpConfig{} - _, err := foldedQuotients.MultiExp(quotients, randomNumbers, config) - if err != nil { - return nil - } - - // fold digests and evals - evals := make([]fr.Element, len(digests)) - for i := 0; i < len(randomNumbers); i++ { - evals[i].Set(&proofs[i].ClaimedValue) - } - - // fold the digests: ∑ᵢλᵢ[f_i(α)]G₁ - // fold the evals : ∑ᵢλᵢfᵢ(aᵢ) - foldedDigests, foldedEvals, err := fold(digests, evals, randomNumbers) - if err != nil { - return err - } - - // compute commitment to folded Eval [∑ᵢλᵢfᵢ(aᵢ)]G₁ - var foldedEvalsCommit bls12381.G1Affine - var foldedEvalsBigInt big.Int - foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) - - // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ - foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) - - // combien the points and the quotients using γᵢ - // ∑ᵢλᵢ[p_i]([Hᵢ(α)]G₁) - var foldedPointsQuotients bls12381.G1Affine - for i := 0; i < len(randomNumbers); i++ { - randomNumbers[i].Mul(&randomNumbers[i], &points[i]) - } - _, err = foldedPointsQuotients.MultiExp(quotients, randomNumbers, config) - if err != nil { - return err - } - - // ∑ᵢλᵢ[f_i(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ + ∑ᵢλᵢ[p_i]([Hᵢ(α)]G₁) - // = [∑ᵢλᵢf_i(α) - ∑ᵢλᵢfᵢ(aᵢ) + ∑ᵢλᵢpᵢHᵢ(α)]G₁ - foldedDigests.Add(&foldedDigests, &foldedPointsQuotients) - - // -∑ᵢλᵢ[Qᵢ(α)]G₁ - foldedQuotients.Neg(&foldedQuotients) - - // pairing check - // e([∑ᵢλᵢ(fᵢ(α) - fᵢ(pᵢ) + pᵢHᵢ(α))]G₁, G₂).e([-∑ᵢλᵢ[Hᵢ(α)]G₁), [α]G₂) - check, err := bls12381.PairingCheck( - []bls12381.G1Affine{foldedDigests, foldedQuotients}, - []bls12381.G2Affine{srs.G2[0], srs.G2[1]}, - ) - if err != nil { - return err - } - if !check { - return ErrVerifyOpeningProof - } - return nil - -} - -// fold folds digests and evaluations using the list of factors as random numbers. -// -// * digests list of digests to fold -// * evaluations list of evaluations to fold -// * factors list of multiplicative factors used for the folding (in Montgomery form) -// -// * Returns ∑ᵢcᵢdᵢ, ∑ᵢcᵢf(aᵢ) -func fold(di []Digest, fai []fr.Element, ci []fr.Element) (Digest, fr.Element, error) { - - // length inconsistancy between digests and evaluations should have been done before calling this function - nbDigests := len(di) - - // fold the claimed values ∑ᵢcᵢf(aᵢ) - var foldedEvaluations, tmp fr.Element - for i := 0; i < nbDigests; i++ { - tmp.Mul(&fai[i], &ci[i]) - foldedEvaluations.Add(&foldedEvaluations, &tmp) - } - - // fold the digests ∑ᵢ[cᵢ]([fᵢ(α)]G₁) - var foldedDigests Digest - _, err := foldedDigests.MultiExp(di, ci, ecc.MultiExpConfig{}) - if err != nil { - return foldedDigests, foldedEvaluations, err - } - - // folding done - return foldedDigests, foldedEvaluations, nil - -} - -// deriveGamma derives a challenge using Fiat Shamir to fold proofs. -func deriveGamma(point fr.Element, digests []Digest, hf hash.Hash) (fr.Element, error) { - - // derive the challenge gamma, binded to the point and the commitments - fs := fiatshamir.NewTranscript(hf, "gamma") - if err := fs.Bind("gamma", point.Marshal()); err != nil { - return fr.Element{}, err - } - for i := 0; i < len(digests); i++ { - if err := fs.Bind("gamma", digests[i].Marshal()); err != nil { - return fr.Element{}, err - } - } - gammaByte, err := fs.ComputeChallenge("gamma") - if err != nil { - return fr.Element{}, err - } - var gamma fr.Element - gamma.SetBytes(gammaByte) - - return gamma, nil -} - -// dividePolyByXminusA computes (f-f(a))/(x-a), in canonical basis, in regular form -// f memory is re-used for the result -func dividePolyByXminusA(f []fr.Element, fa, a fr.Element) []fr.Element { - - // first we compute f-f(a) - f[0].Sub(&f[0], &fa) - - // now we use syntetic division to divide by x-a - var t fr.Element - for i := len(f) - 2; i >= 0; i-- { - t.Mul(&f[i+1], &a) - - f[i].Add(&f[i], &t) - } - - // the result is of degree deg(f)-1 - return f[1:] -} diff --git a/tools/gnark/bls12_381/fr/kzg/kzg_test.go b/tools/gnark/bls12_381/fr/kzg/kzg_test.go deleted file mode 100644 index 2332edb4..00000000 --- a/tools/gnark/bls12_381/fr/kzg/kzg_test.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package kzg - -import ( - "bytes" - "crypto/sha256" - "math/big" - "reflect" - "testing" - - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/ecc/bls12-381" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" -) - -// testSRS re-used accross tests of the KZG scheme -var testSRS *SRS - -func init() { - const srsSize = 230 - testSRS, _ = NewSRS(ecc.NextPowerOfTwo(srsSize), new(big.Int).SetInt64(42)) -} - -func TestDividePolyByXminusA(t *testing.T) { - - const pSize = 230 - - // build random polynomial - pol := make([]fr.Element, pSize) - pol[0].SetRandom() - for i := 1; i < pSize; i++ { - pol[i] = pol[i-1] - } - - // evaluate the polynomial at a random point - var point fr.Element - point.SetRandom() - evaluation := eval(pol, point) - - // probabilistic test (using Schwartz Zippel lemma, evaluation at one point is enough) - var randPoint, xminusa fr.Element - randPoint.SetRandom() - polRandpoint := eval(pol, randPoint) - polRandpoint.Sub(&polRandpoint, &evaluation) // f(rand)-f(point) - - // compute f-f(a)/x-a - h := dividePolyByXminusA(pol, evaluation, point) - pol = nil // h reuses this memory - - if len(h) != 229 { - t.Fatal("inconsistant size of quotient") - } - - hRandPoint := eval(h, randPoint) - xminusa.Sub(&randPoint, &point) // rand-point - - // f(rand)-f(point) ==? h(rand)*(rand-point) - hRandPoint.Mul(&hRandPoint, &xminusa) - - if !hRandPoint.Equal(&polRandpoint) { - t.Fatal("Error f-f(a)/x-a") - } -} - -func TestSerializationSRS(t *testing.T) { - - // create a SRS - srs, err := NewSRS(64, new(big.Int).SetInt64(42)) - if err != nil { - t.Fatal(err) - } - - // serialize it... - var buf bytes.Buffer - _, err = srs.WriteTo(&buf) - if err != nil { - t.Fatal(err) - } - - // reconstruct the SRS - var _srs SRS - _, err = _srs.ReadFrom(&buf) - if err != nil { - t.Fatal(err) - } - - // compare - if !reflect.DeepEqual(srs, &_srs) { - t.Fatal("scheme serialization failed") - } - -} - -func TestCommit(t *testing.T) { - - // create a polynomial - f := make([]fr.Element, 60) - for i := 0; i < 60; i++ { - f[i].SetRandom() - } - - // commit using the method from KZG - _kzgCommit, err := Commit(f, testSRS) - if err != nil { - t.Fatal(err) - } - var kzgCommit bls12381.G1Affine - kzgCommit.Unmarshal(_kzgCommit.Marshal()) - - // check commitment using manual commit - var x fr.Element - x.SetString("42") - fx := eval(f, x) - var fxbi big.Int - fx.ToBigIntRegular(&fxbi) - var manualCommit bls12381.G1Affine - manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) - - // compare both results - if !kzgCommit.Equal(&manualCommit) { - t.Fatal("error KZG commitment") - } - -} - -func TestVerifySinglePoint(t *testing.T) { - - // create a polynomial - f := randomPolynomial(60) - - // commit the polynomial - digest, err := Commit(f, testSRS) - if err != nil { - t.Fatal(err) - } - - // compute opening proof at a random point - var point fr.Element - point.SetString("4321") - proof, err := Open(f, point, testSRS) - if err != nil { - t.Fatal(err) - } - - // verify the claimed valued - expected := eval(f, point) - if !proof.ClaimedValue.Equal(&expected) { - t.Fatal("inconsistant claimed value") - } - - // verify correct proof - err = Verify(&digest, &proof, point, testSRS) - if err != nil { - t.Fatal(err) - } - - { - // verify wrong proof - proof.ClaimedValue.Double(&proof.ClaimedValue) - err = Verify(&digest, &proof, point, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } - { - // verify wrong proof with quotient set to zero - // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 - proof.H.X.SetZero() - proof.H.Y.SetZero() - err = Verify(&digest, &proof, point, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } -} - -func TestBatchVerifySinglePoint(t *testing.T) { - - size := 40 - - // create polynomials - f := make([][]fr.Element, 10) - for i := 0; i < 10; i++ { - f[i] = randomPolynomial(size) - } - - // commit the polynomials - digests := make([]Digest, 10) - for i := 0; i < 10; i++ { - digests[i], _ = Commit(f[i], testSRS) - - } - - // pick a hash function - hf := sha256.New() - - // compute opening proof at a random point - var point fr.Element - point.SetString("4321") - proof, err := BatchOpenSinglePoint(f, digests, point, hf, testSRS) - if err != nil { - t.Fatal(err) - } - - // verify the claimed values - for i := 0; i < 10; i++ { - expectedClaim := eval(f[i], point) - if !expectedClaim.Equal(&proof.ClaimedValues[i]) { - t.Fatal("inconsistant claimed values") - } - } - - // verify correct proof - err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) - if err != nil { - t.Fatal(err) - } - - { - // verify wrong proof - proof.ClaimedValues[0].Double(&proof.ClaimedValues[0]) - err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } - { - // verify wrong proof with quotient set to zero - // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 - proof.H.X.SetZero() - proof.H.Y.SetZero() - err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } - -} - -func TestBatchVerifyMultiPoints(t *testing.T) { - - // create polynomials - f := make([][]fr.Element, 10) - for i := 0; i < 10; i++ { - f[i] = randomPolynomial(40) - } - - // commit the polynomials - digests := make([]Digest, 10) - for i := 0; i < 10; i++ { - digests[i], _ = Commit(f[i], testSRS) - } - - // pick a hash function - hf := sha256.New() - - // compute 2 batch opening proofs at 2 random points - points := make([]fr.Element, 2) - batchProofs := make([]BatchOpeningProof, 2) - points[0].SetRandom() - batchProofs[0], _ = BatchOpenSinglePoint(f[:5], digests[:5], points[0], hf, testSRS) - points[1].SetRandom() - batchProofs[1], _ = BatchOpenSinglePoint(f[5:], digests[5:], points[1], hf, testSRS) - - // fold the 2 batch opening proofs - proofs := make([]OpeningProof, 2) - foldedDigests := make([]Digest, 2) - proofs[0], foldedDigests[0], _ = FoldProof(digests[:5], &batchProofs[0], points[0], hf) - proofs[1], foldedDigests[1], _ = FoldProof(digests[5:], &batchProofs[1], points[1], hf) - - // check the the individual batch proofs are correct - err := Verify(&foldedDigests[0], &proofs[0], points[0], testSRS) - if err != nil { - t.Fatal(err) - } - err = Verify(&foldedDigests[1], &proofs[1], points[1], testSRS) - if err != nil { - t.Fatal(err) - } - - // batch verify correct folded proofs - err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) - if err != nil { - t.Fatal(err) - } - - { - // batch verify tampered folded proofs - proofs[0].ClaimedValue.Double(&proofs[0].ClaimedValue) - - err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) - if err == nil { - t.Fatal(err) - } - } - { - // batch verify tampered folded proofs with quotients set to infinity - // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 - proofs[0].H.X.SetZero() - proofs[0].H.Y.SetZero() - proofs[1].H.X.SetZero() - proofs[1].H.Y.SetZero() - err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) - if err == nil { - t.Fatal(err) - } - } - -} - -const benchSize = 1 << 16 - -func BenchmarkKZGCommit(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - // random polynomial - p := randomPolynomial(benchSize / 2) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = Commit(p, benchSRS) - } -} - -func BenchmarkDivideByXMinusA(b *testing.B) { - const pSize = 1 << 22 - - // build random polynomial - pol := make([]fr.Element, pSize) - pol[0].SetRandom() - for i := 1; i < pSize; i++ { - pol[i] = pol[i-1] - } - var a, fa fr.Element - a.SetRandom() - fa.SetRandom() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - dividePolyByXminusA(pol, fa, a) - pol = pol[:pSize] - pol[pSize-1] = pol[0] - } -} - -func BenchmarkKZGOpen(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // random polynomial - p := randomPolynomial(benchSize / 2) - var r fr.Element - r.SetRandom() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = Open(p, r, benchSRS) - } -} - -func BenchmarkKZGVerify(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // random polynomial - p := randomPolynomial(benchSize / 2) - var r fr.Element - r.SetRandom() - - // commit - comm, err := Commit(p, benchSRS) - if err != nil { - b.Fatal(err) - } - - // open - openingProof, err := Open(p, r, benchSRS) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - Verify(&comm, &openingProof, r, benchSRS) - } -} - -func BenchmarkKZGBatchOpen10(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // 10 random polynomials - var ps [10][]fr.Element - for i := 0; i < 10; i++ { - ps[i] = randomPolynomial(benchSize / 2) - } - - // commitments - var commitments [10]Digest - for i := 0; i < 10; i++ { - commitments[i], _ = Commit(ps[i], benchSRS) - } - - // pick a hash function - hf := sha256.New() - - var r fr.Element - r.SetRandom() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - BatchOpenSinglePoint(ps[:], commitments[:], r, hf, benchSRS) - } -} - -func BenchmarkKZGBatchVerify10(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // 10 random polynomials - var ps [10][]fr.Element - for i := 0; i < 10; i++ { - ps[i] = randomPolynomial(benchSize / 2) - } - - // commitments - var commitments [10]Digest - for i := 0; i < 10; i++ { - commitments[i], _ = Commit(ps[i], benchSRS) - } - - // pick a hash function - hf := sha256.New() - - var r fr.Element - r.SetRandom() - - proof, err := BatchOpenSinglePoint(ps[:], commitments[:], r, hf, benchSRS) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - BatchVerifySinglePoint(commitments[:], &proof, r, hf, benchSRS) - } -} - -func randomPolynomial(size int) []fr.Element { - f := make([]fr.Element, size) - for i := 0; i < size; i++ { - f[i].SetRandom() - } - return f -} diff --git a/tools/gnark/bls12_381/fr/kzg/marshal.go b/tools/gnark/bls12_381/fr/kzg/marshal.go deleted file mode 100644 index 527d3ddf..00000000 --- a/tools/gnark/bls12_381/fr/kzg/marshal.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package kzg - -import ( - "github.com/consensys/gnark-crypto/ecc/bls12-381" - "io" -) - -// WriteTo writes binary encoding of the SRS -func (srs *SRS) WriteTo(w io.Writer) (int64, error) { - // encode the SRS - enc := bls12381.NewEncoder(w) - - toEncode := []interface{}{ - &srs.G2[0], - &srs.G2[1], - srs.G1, - } - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom decodes SRS data from reader. -func (srs *SRS) ReadFrom(r io.Reader) (int64, error) { - // decode the SRS - dec := bls12381.NewDecoder(r) - - toDecode := []interface{}{ - &srs.G2[0], - &srs.G2[1], - &srs.G1, - } - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - return dec.BytesRead(), nil -} - -// WriteTo writes binary encoding of a OpeningProof -func (proof *OpeningProof) WriteTo(w io.Writer) (int64, error) { - enc := bls12381.NewEncoder(w) - - toEncode := []interface{}{ - &proof.H, - &proof.ClaimedValue, - } - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom decodes OpeningProof data from reader. -func (proof *OpeningProof) ReadFrom(r io.Reader) (int64, error) { - dec := bls12381.NewDecoder(r) - - toDecode := []interface{}{ - &proof.H, - &proof.ClaimedValue, - } - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - return dec.BytesRead(), nil -} - -// WriteTo writes binary encoding of a BatchOpeningProof -func (proof *BatchOpeningProof) WriteTo(w io.Writer) (int64, error) { - enc := bls12381.NewEncoder(w) - - toEncode := []interface{}{ - &proof.H, - proof.ClaimedValues, - } - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom decodes BatchOpeningProof data from reader. -func (proof *BatchOpeningProof) ReadFrom(r io.Reader) (int64, error) { - dec := bls12381.NewDecoder(r) - - toDecode := []interface{}{ - &proof.H, - &proof.ClaimedValues, - } - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - return dec.BytesRead(), nil -} diff --git a/tools/gnark/bls12_381/fr/mimc/decompose.go b/tools/gnark/bls12_381/fr/mimc/decompose.go deleted file mode 100644 index 925d6793..00000000 --- a/tools/gnark/bls12_381/fr/mimc/decompose.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package mimc - -import ( - "math/big" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" -) - -// Decompose interpret rawBytes as a bigInt x in big endian, -// and returns the digits of x (from LSB to MSB) when x is written -// in basis modulo. -func Decompose(rawBytes []byte) []fr.Element { - - rawBigInt := big.NewInt(0).SetBytes(rawBytes) - modulo := fr.Modulus() - - // maximum number of chunks that a function - maxNbChunks := len(rawBytes) / fr.Bytes - - res := make([]fr.Element, 0, maxNbChunks) - var tmp fr.Element - t := new(big.Int) - for rawBigInt.Sign() != 0 { - rawBigInt.DivMod(rawBigInt, modulo, t) - tmp.SetBigInt(t) - res = append(res, tmp) - } - - return res -} diff --git a/tools/gnark/bls12_381/fr/mimc/decompose_test.go b/tools/gnark/bls12_381/fr/mimc/decompose_test.go deleted file mode 100644 index 36809a2a..00000000 --- a/tools/gnark/bls12_381/fr/mimc/decompose_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package mimc - -import ( - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" -) - -func TestDecompose(t *testing.T) { - - // create 10 random digits in basis r - nbDigits := 10 - a := make([]fr.Element, nbDigits) - for i := 0; i < nbDigits; i++ { - a[i].SetRandom() - } - - // create a big int whose digits in basis r are a - m := fr.Modulus() - var b, tmp big.Int - for i := nbDigits - 1; i >= 0; i-- { - b.Mul(&b, m) - a[i].ToBigIntRegular(&tmp) - b.Add(&b, &tmp) - } - - // query the decomposition and compare to a - bb := b.Bytes() - d := Decompose(bb) - for i := 0; i < nbDigits; i++ { - if !d[i].Equal(&a[i]) { - t.Fatal("error decomposition") - } - } - -} diff --git a/tools/gnark/bls12_381/fr/mimc/doc.go b/tools/gnark/bls12_381/fr/mimc/doc.go deleted file mode 100644 index 497bd40a..00000000 --- a/tools/gnark/bls12_381/fr/mimc/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. -package mimc diff --git a/tools/gnark/bls12_381/fr/mimc/mimc.go b/tools/gnark/bls12_381/fr/mimc/mimc.go deleted file mode 100644 index e704e01f..00000000 --- a/tools/gnark/bls12_381/fr/mimc/mimc.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package mimc - -import ( - "errors" - "hash" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "golang.org/x/crypto/sha3" - "math/big" - "sync" -) - -const ( - mimcNbRounds = 91 - seed = "seed" // seed to derive the constants - BlockSize = fr.Bytes // BlockSize size that mimc consumes -) - -// Params constants for the mimc hash function -var ( - mimcConstants [mimcNbRounds]fr.Element - once sync.Once -) - -// digest represents the partial evaluation of the checksum -// along with the params of the mimc function -type digest struct { - h fr.Element - data []byte // data to hash -} - -// GetConstants exposed to be used in gnark -func GetConstants() []big.Int { - once.Do(initConstants) // init constants - res := make([]big.Int, mimcNbRounds) - for i := 0; i < mimcNbRounds; i++ { - mimcConstants[i].ToBigIntRegular(&res[i]) - } - return res -} - -// NewMiMC returns a MiMCImpl object, pure-go reference implementation -func NewMiMC() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -// Reset resets the Hash to its initial state. -func (d *digest) Reset() { - d.data = nil - d.h = fr.Element{0, 0, 0, 0} -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (d *digest) Sum(b []byte) []byte { - buffer := d.checksum() - d.data = nil // flush the data already hashed - hash := buffer.Bytes() - b = append(b, hash[:]...) - return b -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (d *digest) Size() int { - return BlockSize -} - -// BlockSize returns the number of bytes Sum will return. -func (d *digest) BlockSize() int { - return BlockSize -} - -// Write (via the embedded io.Writer interface) adds more data to the running hash. -// -// Each []byte block of size BlockSize represents a big endian fr.Element. -// -// If len(p) is not a multiple of BlockSize and any of the []byte in p represent an integer -// larger than fr.Modulus, this function returns an error. -// -// To hash arbitrary data ([]byte not representing canonical field elements) use Decompose -// function in this package. -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - if n%BlockSize != 0 { - return 0, errors.New("invalid input length: must represent a list of field elements, expects a []byte of len m*BlockSize") - } - - // ensure each block represents a field element in canonical reduced form - for i := 0; i < n; i += BlockSize { - if _, err = fr.BigEndian.Element((*[BlockSize]byte)(p[i : i+BlockSize])); err != nil { - return 0, err - } - } - - d.data = append(d.data, p...) - return -} - -// Hash hash using Miyaguchi-Preneel: -// https://en.wikipedia.org/wiki/One-way_compression_function -// The XOR operation is replaced by field addition, data is in Montgomery form -func (d *digest) checksum() fr.Element { - // Write guarantees len(data) % BlockSize == 0 - - // TODO @ThomasPiellard shouldn't Sum() returns an error if there is no data? - if len(d.data) == 0 { - d.data = make([]byte, BlockSize) - } - - for i := 0; i < len(d.data); i += BlockSize { - x, _ := fr.BigEndian.Element((*[BlockSize]byte)(d.data[i : i+BlockSize])) - r := d.encrypt(x) - d.h.Add(&r, &d.h).Add(&d.h, &x) - } - - return d.h -} - -// plain execution of a mimc run -// m: message -// k: encryption key -func (d *digest) encrypt(m fr.Element) fr.Element { - once.Do(initConstants) // init constants - - for i := 0; i < mimcNbRounds; i++ { - // m = (m+k+c)^5 - var tmp fr.Element - tmp.Add(&m, &d.h).Add(&tmp, &mimcConstants[i]) - m.Square(&tmp). - Square(&m). - Mul(&m, &tmp) - } - m.Add(&m, &d.h) - return m -} - -// Sum computes the mimc hash of msg from seed -func Sum(msg []byte) ([]byte, error) { - var d digest - if _, err := d.Write(msg); err != nil { - return nil, err - } - h := d.checksum() - bytes := h.Bytes() - return bytes[:], nil -} - -func initConstants() { - bseed := ([]byte)(seed) - - hash := sha3.NewLegacyKeccak256() - _, _ = hash.Write(bseed) - rnd := hash.Sum(nil) // pre hash before use - hash.Reset() - _, _ = hash.Write(rnd) - - for i := 0; i < mimcNbRounds; i++ { - rnd = hash.Sum(nil) - mimcConstants[i].SetBytes(rnd) - hash.Reset() - _, _ = hash.Write(rnd) - } -} diff --git a/tools/gnark/bls12_381/fr/pedersen/pedersen.go b/tools/gnark/bls12_381/fr/pedersen/pedersen.go deleted file mode 100644 index 38cc4d32..00000000 --- a/tools/gnark/bls12_381/fr/pedersen/pedersen.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package pedersen - -import ( - "crypto/rand" - "fmt" - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/ecc/bls12-381" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "math/big" -) - -// Key for proof and verification -type Key struct { - g bls12381.G2Affine // TODO @tabaie: does this really have to be randomized? - gRootSigmaNeg bls12381.G2Affine //gRootSigmaNeg = g^{-1/σ} - basis []bls12381.G1Affine - basisExpSigma []bls12381.G1Affine -} - -func randomOnG2() (bls12381.G2Affine, error) { // TODO: Add to G2.go? - gBytes := make([]byte, fr.Bytes) - if _, err := rand.Read(gBytes); err != nil { - return bls12381.G2Affine{}, err - } - return bls12381.HashToG2(gBytes, []byte("random on g2")) -} - -func Setup(basis []bls12381.G1Affine) (Key, error) { - var ( - k Key - err error - ) - - if k.g, err = randomOnG2(); err != nil { - return k, err - } - - var modMinusOne big.Int - modMinusOne.Sub(fr.Modulus(), big.NewInt(1)) - var sigma *big.Int - if sigma, err = rand.Int(rand.Reader, &modMinusOne); err != nil { - return k, err - } - sigma.Add(sigma, big.NewInt(1)) - - var sigmaInvNeg big.Int - sigmaInvNeg.ModInverse(sigma, fr.Modulus()) - sigmaInvNeg.Sub(fr.Modulus(), &sigmaInvNeg) - k.gRootSigmaNeg.ScalarMultiplication(&k.g, &sigmaInvNeg) - - k.basisExpSigma = make([]bls12381.G1Affine, len(basis)) - for i := range basis { - k.basisExpSigma[i].ScalarMultiplication(&basis[i], sigma) - } - - k.basis = basis - return k, err -} - -func (k *Key) Commit(values []fr.Element) (commitment bls12381.G1Affine, knowledgeProof bls12381.G1Affine, err error) { - - if len(values) != len(k.basis) { - err = fmt.Errorf("unexpected number of values") - return - } - - // TODO @gbotrel this will spawn more than one task, see - // https://github.com/ConsenSys/gnark-crypto/issues/269 - config := ecc.MultiExpConfig{ - NbTasks: 1, // TODO Experiment - } - - if _, err = commitment.MultiExp(k.basis, values, config); err != nil { - return - } - - _, err = knowledgeProof.MultiExp(k.basisExpSigma, values, config) - - return -} - -// VerifyKnowledgeProof checks if the proof of knowledge is valid -func (k *Key) VerifyKnowledgeProof(commitment bls12381.G1Affine, knowledgeProof bls12381.G1Affine) error { - - if !commitment.IsInSubGroup() || !knowledgeProof.IsInSubGroup() { - return fmt.Errorf("subgroup check failed") - } - - product, err := bls12381.Pair([]bls12381.G1Affine{commitment, knowledgeProof}, []bls12381.G2Affine{k.g, k.gRootSigmaNeg}) - if err != nil { - return err - } - if product.IsOne() { - return nil - } - return fmt.Errorf("proof rejected") -} diff --git a/tools/gnark/bls12_381/fr/pedersen/pedersen_test.go b/tools/gnark/bls12_381/fr/pedersen/pedersen_test.go deleted file mode 100644 index 111f6359..00000000 --- a/tools/gnark/bls12_381/fr/pedersen/pedersen_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package pedersen - -import ( - "github.com/consensys/gnark-crypto/ecc/bls12-381" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/stretchr/testify/assert" - "math/rand" - "testing" -) - -func interfaceSliceToFrSlice(t *testing.T, values ...interface{}) []fr.Element { - res := make([]fr.Element, len(values)) - for i, v := range values { - _, err := res[i].SetInterface(v) - assert.NoError(t, err) - } - return res -} - -func randomFrSlice(t *testing.T, size int) []interface{} { - res := make([]interface{}, size) - var err error - for i := range res { - var v fr.Element - res[i], err = v.SetRandom() - assert.NoError(t, err) - } - return res -} - -func randomOnG1() (bls12381.G1Affine, error) { // TODO: Add to G1.go? - gBytes := make([]byte, fr.Bytes) - if _, err := rand.Read(gBytes); err != nil { - return bls12381.G1Affine{}, err - } - return bls12381.HashToG1(gBytes, []byte("random on g2")) -} - -func testCommit(t *testing.T, values ...interface{}) { - - basis := make([]bls12381.G1Affine, len(values)) - for i := range basis { - var err error - basis[i], err = randomOnG1() - assert.NoError(t, err) - } - - var ( - key Key - err error - commitment, pok bls12381.G1Affine - ) - - key, err = Setup(basis) - assert.NoError(t, err) - commitment, pok, err = key.Commit(interfaceSliceToFrSlice(t, values...)) - assert.NoError(t, err) - assert.NoError(t, key.VerifyKnowledgeProof(commitment, pok)) - - pok.Neg(&pok) - assert.NotNil(t, key.VerifyKnowledgeProof(commitment, pok)) -} - -func TestCommitToOne(t *testing.T) { - testCommit(t, 1) -} - -func TestCommitSingle(t *testing.T) { - testCommit(t, randomFrSlice(t, 1)...) -} - -func TestCommitFiveElements(t *testing.T) { - testCommit(t, randomFrSlice(t, 5)...) -} diff --git a/tools/gnark/bls12_381/fr/permutation/doc.go b/tools/gnark/bls12_381/fr/permutation/doc.go deleted file mode 100644 index bdf98e6c..00000000 --- a/tools/gnark/bls12_381/fr/permutation/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package permutation provides an API to build permutation proofs. -package permutation diff --git a/tools/gnark/bls12_381/fr/permutation/permutation.go b/tools/gnark/bls12_381/fr/permutation/permutation.go deleted file mode 100644 index e2dab399..00000000 --- a/tools/gnark/bls12_381/fr/permutation/permutation.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package permutation - -import ( - "crypto/sha256" - "errors" - "math/big" - "math/bits" - - "github.com/consensys/gnark-crypto/ecc/bls12-381" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/fft" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/kzg" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrIncompatibleSize = errors.New("t1 and t2 should be of the same size") - ErrSize = errors.New("t1 and t2 should be of size a power of 2") - ErrPermutationProof = errors.New("permutation proof verification failed") - ErrGenerator = errors.New("wrong generator") -) - -// Proof proof that the commitments of t1 and t2 come from -// the same vector but permuted. -type Proof struct { - - // size of the polynomials - size int - - // generator of the fft domain, used for shifting the evaluation point - g fr.Element - - // commitments of t1 & t2, the permuted vectors, and z, the accumulation - // polynomial - t1, t2, z kzg.Digest - - // commitment to the quotient polynomial - q kzg.Digest - - // opening proofs of t1, t2, z, q (in that order) - batchedProof kzg.BatchOpeningProof - - // shifted opening proof of z - shiftedProof kzg.OpeningProof -} - -// evaluateAccumulationPolynomialBitReversed returns the accumulation polynomial in Lagrange basis. -func evaluateAccumulationPolynomialBitReversed(lt1, lt2 []fr.Element, epsilon fr.Element) []fr.Element { - - s := len(lt1) - z := make([]fr.Element, s) - d := make([]fr.Element, s) - z[0].SetOne() - d[0].SetOne() - nn := uint64(64 - bits.TrailingZeros64(uint64(s))) - var t fr.Element - for i := 0; i < s-1; i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) - z[_ii].Mul(&z[_i], t.Sub(&epsilon, <1[i])) - d[i+1].Mul(&d[i], t.Sub(&epsilon, <2[i])) - } - d = fr.BatchInvert(d) - for i := 0; i < s-1; i++ { - _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) - z[_ii].Mul(&z[_ii], &d[i+1]) - } - - return z -} - -// evaluateFirstPartNumReverse computes lt2*z(gx) - lt1*z -func evaluateFirstPartNumReverse(lt1, lt2, lz []fr.Element, epsilon fr.Element) []fr.Element { - - s := len(lt1) - res := make([]fr.Element, s) - var a, b fr.Element - nn := uint64(64 - bits.TrailingZeros64(uint64(s))) - for i := 0; i < s; i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) - a.Sub(&epsilon, <2[_i]) - a.Mul(&lz[_ii], &a) - b.Sub(&epsilon, <1[_i]) - b.Mul(&lz[_i], &b) - res[_i].Sub(&a, &b) - } - return res -} - -// evaluateSecondPartNumReverse computes L0 * (z-1) -func evaluateSecondPartNumReverse(lz []fr.Element, d *fft.Domain) []fr.Element { - - var tn, o, g fr.Element - o.SetOne() - tn.Exp(d.FrMultiplicativeGen, big.NewInt(int64(d.Cardinality))). - Sub(&tn, &o) - s := len(lz) - u := make([]fr.Element, s) - g.Set(&d.FrMultiplicativeGen) - for i := 0; i < s; i++ { - u[i].Sub(&g, &o) - g.Mul(&g, &d.Generator) - } - u = fr.BatchInvert(u) - res := make([]fr.Element, s) - nn := uint64(64 - bits.TrailingZeros64(uint64(s))) - for i := 0; i < s; i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - res[_i].Sub(&lz[_i], &o). - Mul(&res[_i], &u[i]). - Mul(&res[_i], &tn) - } - return res -} - -// Prove generates a proof that t1 and t2 are the same but permuted. -// The size of t1 and t2 should be the same and a power of 2. -func Prove(srs *kzg.SRS, t1, t2 []fr.Element) (Proof, error) { - - // res - var proof Proof - var err error - - // size checking - if len(t1) != len(t2) { - return proof, ErrIncompatibleSize - } - - // create the domains - d := fft.NewDomain(uint64(len(t1))) - if d.Cardinality != uint64(len(t1)) { - return proof, ErrSize - } - s := int(d.Cardinality) - proof.size = s - proof.g.Set(&d.Generator) - - // hash function for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "epsilon", "omega", "eta") - - // commit t1, t2 - ct1 := make([]fr.Element, s) - ct2 := make([]fr.Element, s) - copy(ct1, t1) - copy(ct2, t2) - d.FFTInverse(ct1, fft.DIF) - d.FFTInverse(ct2, fft.DIF) - fft.BitReverse(ct1) - fft.BitReverse(ct2) - proof.t1, err = kzg.Commit(ct1, srs) - if err != nil { - return proof, err - } - proof.t2, err = kzg.Commit(ct2, srs) - if err != nil { - return proof, err - } - - // derive challenge for z - epsilon, err := deriveRandomness(&fs, "epsilon", &proof.t1, &proof.t2) - if err != nil { - return proof, err - } - - // compute Z and commit it - cz := evaluateAccumulationPolynomialBitReversed(t1, t2, epsilon) - d.FFTInverse(cz, fft.DIT) - proof.z, err = kzg.Commit(cz, srs) - if err != nil { - return proof, err - } - lz := make([]fr.Element, s) - copy(lz, cz) - d.FFT(lz, fft.DIF, true) - - // compute the first part of the numerator - lt1 := make([]fr.Element, s) - lt2 := make([]fr.Element, s) - copy(lt1, ct1) - copy(lt2, ct2) - d.FFT(lt1, fft.DIF, true) - d.FFT(lt2, fft.DIF, true) - lsNumFirstPart := evaluateFirstPartNumReverse(lt1, lt2, lz, epsilon) - - // compute second part of the numerator - lsNum := evaluateSecondPartNumReverse(lz, d) - - // derive challenge used for the folding - omega, err := deriveRandomness(&fs, "omega", &proof.z) - if err != nil { - return proof, err - } - - // fold the numerator and divide it by x^n-1 - var t, one fr.Element - one.SetOne() - t.Exp(d.FrMultiplicativeGen, big.NewInt(int64(d.Cardinality))).Sub(&t, &one).Inverse(&t) - for i := 0; i < s; i++ { - lsNum[i].Mul(&omega, &lsNum[i]). - Add(&lsNum[i], &lsNumFirstPart[i]). - Mul(&lsNum[i], &t) - } - - // get the quotient and commit it - d.FFTInverse(lsNum, fft.DIT, true) - proof.q, err = kzg.Commit(lsNum, srs) - if err != nil { - return proof, err - } - - // derive the evaluation challenge - eta, err := deriveRandomness(&fs, "eta", &proof.q) - if err != nil { - return proof, err - } - - // compute the opening proofs - proof.batchedProof, err = kzg.BatchOpenSinglePoint( - [][]fr.Element{ - ct1, - ct2, - cz, - lsNum, - }, - []kzg.Digest{ - proof.t1, - proof.t2, - proof.z, - proof.q, - }, - eta, - hFunc, - srs, - ) - if err != nil { - return proof, err - } - - var shiftedEta fr.Element - shiftedEta.Mul(&eta, &d.Generator) - proof.shiftedProof, err = kzg.Open( - cz, - shiftedEta, - srs, - ) - if err != nil { - return proof, err - } - - // done - return proof, nil - -} - -// Verify verifies a permutation proof. -func Verify(srs *kzg.SRS, proof Proof) error { - - // hash function that is used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "epsilon", "omega", "eta") - - // derive the challenges - epsilon, err := deriveRandomness(&fs, "epsilon", &proof.t1, &proof.t2) - if err != nil { - return err - } - - omega, err := deriveRandomness(&fs, "omega", &proof.z) - if err != nil { - return err - } - - eta, err := deriveRandomness(&fs, "eta", &proof.q) - if err != nil { - return err - } - - // check the relation - bs := big.NewInt(int64(proof.size)) - var l0, a, b, one, rhs, lhs fr.Element - one.SetOne() - rhs.Exp(eta, bs). - Sub(&rhs, &one) - a.Sub(&eta, &one) - l0.Div(&rhs, &a) - rhs.Mul(&rhs, &proof.batchedProof.ClaimedValues[3]) - a.Sub(&epsilon, &proof.batchedProof.ClaimedValues[1]). - Mul(&a, &proof.shiftedProof.ClaimedValue) - b.Sub(&epsilon, &proof.batchedProof.ClaimedValues[0]). - Mul(&b, &proof.batchedProof.ClaimedValues[2]) - lhs.Sub(&a, &b) - a.Sub(&proof.batchedProof.ClaimedValues[2], &one). - Mul(&a, &l0). - Mul(&a, &omega) - lhs.Add(&a, &lhs) - if !lhs.Equal(&rhs) { - return ErrPermutationProof - } - - // check the opening proofs - err = kzg.BatchVerifySinglePoint( - []kzg.Digest{ - proof.t1, - proof.t2, - proof.z, - proof.q, - }, - &proof.batchedProof, - eta, - hFunc, - srs, - ) - if err != nil { - return err - } - - var shiftedEta fr.Element - shiftedEta.Mul(&eta, &proof.g) - err = kzg.Verify(&proof.z, &proof.shiftedProof, shiftedEta, srs) - if err != nil { - return err - } - - // check the generator is correct - var checkOrder fr.Element - checkOrder.Exp(proof.g, big.NewInt(int64(proof.size/2))) - if checkOrder.Equal(&one) { - return ErrGenerator - } - checkOrder.Square(&checkOrder) - if !checkOrder.Equal(&one) { - return ErrGenerator - } - - return nil -} - -// TODO put that in fiat-shamir package -func deriveRandomness(fs *fiatshamir.Transcript, challenge string, points ...*bls12381.G1Affine) (fr.Element, error) { - - var buf [bls12381.SizeOfG1AffineUncompressed]byte - var r fr.Element - - for _, p := range points { - buf = p.RawBytes() - if err := fs.Bind(challenge, buf[:]); err != nil { - return r, err - } - } - - b, err := fs.ComputeChallenge(challenge) - if err != nil { - return r, err - } - r.SetBytes(b) - return r, nil -} diff --git a/tools/gnark/bls12_381/fr/permutation/permutation_test.go b/tools/gnark/bls12_381/fr/permutation/permutation_test.go deleted file mode 100644 index e706653d..00000000 --- a/tools/gnark/bls12_381/fr/permutation/permutation_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package permutation - -import ( - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/kzg" -) - -func TestProof(t *testing.T) { - - srs, err := kzg.NewSRS(64, big.NewInt(13)) - if err != nil { - t.Fatal(err) - } - - a := make([]fr.Element, 8) - b := make([]fr.Element, 8) - - for i := 0; i < 8; i++ { - a[i].SetUint64(uint64(4*i + 1)) - } - for i := 0; i < 8; i++ { - b[i].Set(&a[(5*i)%8]) - } - - // correct proof - { - proof, err := Prove(srs, a, b) - if err != nil { - t.Fatal(err) - } - - err = Verify(srs, proof) - if err != nil { - t.Fatal(err) - } - } - - // wrong proof - { - a[0].SetRandom() - proof, err := Prove(srs, a, b) - if err != nil { - t.Fatal(err) - } - - err = Verify(srs, proof) - if err == nil { - t.Fatal(err) - } - } - -} - -func BenchmarkProver(b *testing.B) { - - srsSize := 1 << 15 - polySize := 1 << 14 - - srs, _ := kzg.NewSRS(uint64(srsSize), big.NewInt(13)) - a := make([]fr.Element, polySize) - c := make([]fr.Element, polySize) - - for i := 0; i < polySize; i++ { - a[i].SetUint64(uint64(i)) - } - for i := 0; i < polySize; i++ { - c[i].Set(&a[(5*i)%(polySize)]) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - Prove(srs, a, c) - } - -} diff --git a/tools/gnark/bls12_381/fr/plookup/doc.go b/tools/gnark/bls12_381/fr/plookup/doc.go deleted file mode 100644 index ec4b9128..00000000 --- a/tools/gnark/bls12_381/fr/plookup/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package plookup provides an API to build plookup proofs. -package plookup diff --git a/tools/gnark/bls12_381/fr/plookup/plookup_test.go b/tools/gnark/bls12_381/fr/plookup/plookup_test.go deleted file mode 100644 index c0f85b4d..00000000 --- a/tools/gnark/bls12_381/fr/plookup/plookup_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package plookup - -import ( - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/kzg" -) - -func TestLookupVector(t *testing.T) { - - lookupVector := make(Table, 8) - fvector := make(Table, 7) - for i := 0; i < 8; i++ { - lookupVector[i].SetUint64(uint64(2 * i)) - } - for i := 0; i < 7; i++ { - fvector[i].Set(&lookupVector[(4*i+1)%8]) - } - - srs, err := kzg.NewSRS(64, big.NewInt(13)) - if err != nil { - t.Fatal(err) - } - - // correct proof vector - { - proof, err := ProveLookupVector(srs, fvector, lookupVector) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupVector(srs, proof) - if err != nil { - t.Fatal(err) - } - } - - // wrong proofs vector - { - fvector[0].SetRandom() - - proof, err := ProveLookupVector(srs, fvector, lookupVector) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupVector(srs, proof) - if err == nil { - t.Fatal(err) - } - } - -} - -func TestLookupTable(t *testing.T) { - - srs, err := kzg.NewSRS(64, big.NewInt(13)) - if err != nil { - t.Fatal(err) - } - - lookupTable := make([]Table, 3) - fTable := make([]Table, 3) - for i := 0; i < 3; i++ { - lookupTable[i] = make(Table, 8) - fTable[i] = make(Table, 7) - for j := 0; j < 8; j++ { - lookupTable[i][j].SetUint64(uint64(2*i + j)) - } - for j := 0; j < 7; j++ { - fTable[i][j].Set(&lookupTable[i][(4*j+1)%8]) - } - } - - // correct proof - { - proof, err := ProveLookupTables(srs, fTable, lookupTable) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupTables(srs, proof) - if err != nil { - t.Fatal(err) - } - } - - // wrong proof - { - fTable[0][0].SetRandom() - proof, err := ProveLookupTables(srs, fTable, lookupTable) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupTables(srs, proof) - if err == nil { - t.Fatal(err) - } - } - -} - -func BenchmarkPlookup(b *testing.B) { - - srsSize := 1 << 15 - polySize := 1 << 14 - - srs, _ := kzg.NewSRS(uint64(srsSize), big.NewInt(13)) - a := make(Table, polySize) - c := make(Table, polySize) - - for i := 0; i < 1<<14; i++ { - a[i].SetUint64(uint64(i)) - c[i].SetUint64(uint64((8 * i) % polySize)) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - ProveLookupVector(srs, a, c) - } -} diff --git a/tools/gnark/bls12_381/fr/plookup/table.go b/tools/gnark/bls12_381/fr/plookup/table.go deleted file mode 100644 index 8593e30c..00000000 --- a/tools/gnark/bls12_381/fr/plookup/table.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package plookup - -import ( - "crypto/sha256" - "errors" - "math/big" - "sort" - - bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/fft" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/kzg" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/permutation" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrIncompatibleSize = errors.New("the tables in f and t are not of the same size") - ErrFoldedCommitment = errors.New("the folded commitment is malformed") - ErrNumberDigests = errors.New("proof.ts and proof.fs are not of the same length") -) - -// ProofLookupTables proofs that a list of tables -type ProofLookupTables struct { - - // commitments to the rows f - fs []kzg.Digest - - // commitments to the rows of t - ts []kzg.Digest - - // lookup proof for the f and t folded - foldedProof ProofLookupVector - - // proof that the ts folded correspond to t in the folded proof - permutationProof permutation.Proof -} - -// ProveLookupTables generates a proof that f, seen as a multi dimensional table, -// consists of vectors that are in t. In other words for each i, f[:][i] must be one -// of the t[:][j]. -// -// For instance, if t is the truth table of the XOR function, t will be populated such -// that t[:][i] contains the i-th entry of the truth table, so t[0][i] XOR t[1][i] = t[2][i]. -// -// The Table in f and t are supposed to be of the same size constant size. -func ProveLookupTables(srs *kzg.SRS, f, t []Table) (ProofLookupTables, error) { - - // res - proof := ProofLookupTables{} - var err error - - // hash function used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "lambda") - - // check the sizes - if len(f) != len(t) { - return proof, ErrIncompatibleSize - } - s := len(f[0]) - for i := 1; i < len(f); i++ { - if len(f[i]) != s { - return proof, ErrIncompatibleSize - } - } - s = len(t[0]) - for i := 1; i < len(t); i++ { - if len(t[i]) != s { - return proof, ErrIncompatibleSize - } - } - - // commit to the tables in f and t - nbRows := len(t) - proof.fs = make([]kzg.Digest, nbRows) - proof.ts = make([]kzg.Digest, nbRows) - _nbColumns := len(f[0]) + 1 - if _nbColumns < len(t[0]) { - _nbColumns = len(t[0]) - } - d := fft.NewDomain(uint64(_nbColumns)) - nbColumns := d.Cardinality - lfs := make([][]fr.Element, nbRows) - cfs := make([][]fr.Element, nbRows) - lts := make([][]fr.Element, nbRows) - cts := make([][]fr.Element, nbRows) - - for i := 0; i < nbRows; i++ { - - cfs[i] = make([]fr.Element, nbColumns) - lfs[i] = make([]fr.Element, nbColumns) - copy(cfs[i], f[i]) - copy(lfs[i], f[i]) - for j := len(f[i]); j < int(nbColumns); j++ { - cfs[i][j] = f[i][len(f[i])-1] - lfs[i][j] = f[i][len(f[i])-1] - } - d.FFTInverse(cfs[i], fft.DIF) - fft.BitReverse(cfs[i]) - proof.fs[i], err = kzg.Commit(cfs[i], srs) - if err != nil { - return proof, err - } - - cts[i] = make([]fr.Element, nbColumns) - lts[i] = make([]fr.Element, nbColumns) - copy(cts[i], t[i]) - copy(lts[i], t[i]) - for j := len(t[i]); j < int(d.Cardinality); j++ { - cts[i][j] = t[i][len(t[i])-1] - lts[i][j] = t[i][len(t[i])-1] - } - d.FFTInverse(cts[i], fft.DIF) - fft.BitReverse(cts[i]) - proof.ts[i], err = kzg.Commit(cts[i], srs) - if err != nil { - return proof, err - } - } - - // fold f and t - comms := make([]*kzg.Digest, 2*nbRows) - for i := 0; i < nbRows; i++ { - comms[i] = new(kzg.Digest) - comms[i].Set(&proof.fs[i]) - comms[nbRows+i] = new(kzg.Digest) - comms[nbRows+i].Set(&proof.ts[i]) - } - lambda, err := deriveRandomness(&fs, "lambda", comms...) - if err != nil { - return proof, err - } - foldedf := make(Table, nbColumns) - foldedt := make(Table, nbColumns) - for i := 0; i < int(nbColumns); i++ { - for j := nbRows - 1; j >= 0; j-- { - foldedf[i].Mul(&foldedf[i], &lambda). - Add(&foldedf[i], &lfs[j][i]) - foldedt[i].Mul(&foldedt[i], &lambda). - Add(&foldedt[i], <s[j][i]) - } - } - - // generate a proof of permutation of the foldedt and sort(foldedt) - foldedtSorted := make(Table, nbColumns) - copy(foldedtSorted, foldedt) - sort.Sort(foldedtSorted) - proof.permutationProof, err = permutation.Prove(srs, foldedt, foldedtSorted) - if err != nil { - return proof, err - } - - // call plookupVector, on foldedf[:len(foldedf)-1] to ensure that the domain size - // in ProveLookupVector is the same as d's - proof.foldedProof, err = ProveLookupVector(srs, foldedf[:len(foldedf)-1], foldedt) - - return proof, err -} - -// VerifyLookupTables verifies that a ProofLookupTables proof is correct. -func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { - - // hash function used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "lambda") - - // check that the number of digests is the same - if len(proof.fs) != len(proof.ts) { - return ErrNumberDigests - } - - // fold the commitments fs and ts - nbRows := len(proof.fs) - comms := make([]*kzg.Digest, 2*nbRows) - for i := 0; i < nbRows; i++ { - comms[i] = &proof.fs[i] - comms[i+nbRows] = &proof.ts[i] - } - lambda, err := deriveRandomness(&fs, "lambda", comms...) - if err != nil { - return err - } - - // fold the commitments of the rows of t and f - var comf, comt kzg.Digest - comf.Set(&proof.fs[nbRows-1]) - comt.Set(&proof.ts[nbRows-1]) - var blambda big.Int - lambda.ToBigIntRegular(&blambda) - for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). - Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). - Add(&comt, &proof.ts[i]) - } - - // check that the folded commitment of the fs correspond to foldedProof.f - if !comf.Equal(&proof.foldedProof.f) { - return ErrFoldedCommitment - } - - // check that the folded commitment of the ts is a permutation of proof.FoldedProof.t - err = permutation.Verify(srs, proof.permutationProof) - if err != nil { - return err - } - - // verify the inner proof - return VerifyLookupVector(srs, proof.foldedProof) -} - -// TODO put that in fiat-shamir package -func deriveRandomness(fs *fiatshamir.Transcript, challenge string, points ...*bls12381.G1Affine) (fr.Element, error) { - - var buf [bls12381.SizeOfG1AffineUncompressed]byte - var r fr.Element - - for _, p := range points { - buf = p.RawBytes() - if err := fs.Bind(challenge, buf[:]); err != nil { - return r, err - } - } - - b, err := fs.ComputeChallenge(challenge) - if err != nil { - return r, err - } - r.SetBytes(b) - return r, nil -} diff --git a/tools/gnark/bls12_381/fr/plookup/vector.go b/tools/gnark/bls12_381/fr/plookup/vector.go deleted file mode 100644 index 07cc4465..00000000 --- a/tools/gnark/bls12_381/fr/plookup/vector.go +++ /dev/null @@ -1,735 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package plookup - -import ( - "crypto/sha256" - "errors" - "math/big" - "math/bits" - "sort" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/fft" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/kzg" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrNotInTable = errors.New("some value in the vector is not in the lookup table") - ErrPlookupVerification = errors.New("plookup verification failed") - ErrGenerator = errors.New("wrong generator") -) - -type Table []fr.Element - -// Len is the number of elements in the collection. -func (t Table) Len() int { - return len(t) -} - -// Less reports whether the element with -// index i should sort before the element with index j. -func (t Table) Less(i, j int) bool { - return t[i].Cmp(&t[j]) == -1 -} - -// Swap swaps the elements with indexes i and j. -func (t Table) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -// Proof Plookup proof, containing opening proofs -type ProofLookupVector struct { - - // size of the system - size uint64 - - // generator of the fft domain, used for shifting the evaluation point - g fr.Element - - // Commitments to h1, h2, t, z, f, h - h1, h2, t, z, f, h kzg.Digest - - // Batch opening proof of h1, h2, z, t - BatchedProof kzg.BatchOpeningProof - - // Batch opening proof of h1, h2, z shifted by g - BatchedProofShifted kzg.BatchOpeningProof -} - -// evaluateAccumulationPolynomial computes Z, in Lagrange basis. Z is the accumulation of the partial -// ratios of 2 fully split polynomials (cf https://eprint.iacr.org/2020/315.pdf) -// * lf is the list of values that should be in lt -// * lt is the lookup table -// * lh1, lh2 is lf sorted by lt split in 2 overlapping slices -// * beta, gamma are challenges (Schwartz-zippel: they are the random evaluations point) -func evaluateAccumulationPolynomial(lf, lt, lh1, lh2 []fr.Element, beta, gamma fr.Element) []fr.Element { - - z := make([]fr.Element, len(lt)) - - n := len(lt) - d := make([]fr.Element, n-1) - var u, c fr.Element - c.SetOne(). - Add(&c, &beta). - Mul(&c, &gamma) - for i := 0; i < n-1; i++ { - - d[i].Mul(&beta, &lh1[i+1]). - Add(&d[i], &lh1[i]). - Add(&d[i], &c) - - u.Mul(&beta, &lh2[i+1]). - Add(&u, &lh2[i]). - Add(&u, &c) - - d[i].Mul(&d[i], &u) - } - d = fr.BatchInvert(d) - - z[0].SetOne() - var a, b, e fr.Element - e.SetOne().Add(&e, &beta) - for i := 0; i < n-1; i++ { - - a.Add(&gamma, &lf[i]) - - b.Mul(&beta, <[i+1]). - Add(&b, <[i]). - Add(&b, &c) - - a.Mul(&a, &b). - Mul(&a, &e) - - z[i+1].Mul(&z[i], &a). - Mul(&z[i+1], &d[i]) - } - - return z -} - -// evaluateNumBitReversed computes the evaluation (shifted, bit reversed) of h where -// h = (x-1)*z*(1+\beta)*(\gamma+f)*(\gamma(1+\beta) + t+ \beta*t(gX)) - -// -// (x-1)*z(gX)*(\gamma(1+\beta) + h_{1} + \beta*h_{1}(gX))*(\gamma(1+\beta) + h_{2} + \beta*h_{2}(gX) ) -// -// * cz, ch1, ch2, ct, cf are the polynomials z, h1, h2, t, f in canonical basis -// * _lz, _lh1, _lh2, _lt, _lf are the polynomials z, h1, h2, t, f in shifted Lagrange basis (domainBig) -// * beta, gamma are the challenges -// * it returns h in canonical basis -func evaluateNumBitReversed(_lz, _lh1, _lh2, _lt, _lf []fr.Element, beta, gamma fr.Element, domainBig *fft.Domain) []fr.Element { - - // result - s := int(domainBig.Cardinality) - num := make([]fr.Element, domainBig.Cardinality) - - var u, onePlusBeta, GammaTimesOnePlusBeta, m, n, one fr.Element - - one.SetOne() - onePlusBeta.Add(&one, &beta) - GammaTimesOnePlusBeta.Mul(&onePlusBeta, &gamma) - - g := make([]fr.Element, s) - g[0].Set(&domainBig.FrMultiplicativeGen) - for i := 1; i < s; i++ { - g[i].Mul(&g[i-1], &domainBig.Generator) - } - - var gg fr.Element - expo := big.NewInt(int64(domainBig.Cardinality>>1 - 1)) - gg.Square(&domainBig.Generator).Exp(gg, expo) - - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - for i := 0; i < s; i++ { - - _i := int(bits.Reverse64(uint64(i)) >> nn) - _is := int(bits.Reverse64(uint64((i+2)%s)) >> nn) - - // m = z*(1+\beta)*(\gamma+f)*(\gamma(1+\beta) + t+ \beta*t(gX)) - m.Mul(&onePlusBeta, &_lz[_i]) - u.Add(&gamma, &_lf[_i]) - m.Mul(&m, &u) - u.Mul(&beta, &_lt[_is]). - Add(&u, &_lt[_i]). - Add(&u, &GammaTimesOnePlusBeta) - m.Mul(&m, &u) - - // n = z(gX)*(\gamma(1+\beta) + h_{1} + \beta*h_{1}(gX))*(\gamma(1+\beta) + h_{2} + \beta*h_{2}(gX) - n.Mul(&beta, &_lh1[_is]). - Add(&n, &_lh1[_i]). - Add(&n, &GammaTimesOnePlusBeta) - u.Mul(&beta, &_lh2[_is]). - Add(&u, &_lh2[_i]). - Add(&u, &GammaTimesOnePlusBeta) - n.Mul(&n, &u). - Mul(&n, &_lz[_is]) - - // (x-gg**(n-1))*(m-n) - num[_i].Sub(&m, &n) - u.Sub(&g[i], &gg) - num[_i].Mul(&num[_i], &u) - - } - - return num -} - -// evaluateXnMinusOneDomainBig returns the evaluation of (x^{n}-1) on FrMultiplicativeGen*< g > -func evaluateXnMinusOneDomainBig(domainBig *fft.Domain) [2]fr.Element { - - sizeDomainSmall := domainBig.Cardinality / 2 - - var one fr.Element - one.SetOne() - - // x^{n}-1 on FrMultiplicativeGen*< g > - var res [2]fr.Element - var shift fr.Element - shift.Exp(domainBig.FrMultiplicativeGen, big.NewInt(int64(sizeDomainSmall))) - res[0].Sub(&shift, &one) - res[1].Add(&shift, &one).Neg(&res[1]) - - return res - -} - -// evaluateL0DomainBig returns the evaluation of (x^{n}-1)/(x-1) on -// x^{n}-1 on FrMultiplicativeGen*< g > -func evaluateL0DomainBig(domainBig *fft.Domain) ([2]fr.Element, []fr.Element) { - - var one fr.Element - one.SetOne() - - // x^{n}-1 on FrMultiplicativeGen*< g > - xnMinusOne := evaluateXnMinusOneDomainBig(domainBig) - - // 1/(x-1) on FrMultiplicativeGen*< g > - var acc fr.Element - denL0 := make([]fr.Element, domainBig.Cardinality) - acc.Set(&domainBig.FrMultiplicativeGen) - for i := 0; i < int(domainBig.Cardinality); i++ { - denL0[i].Sub(&acc, &one) - acc.Mul(&acc, &domainBig.Generator) - } - denL0 = fr.BatchInvert(denL0) - - return xnMinusOne, denL0 -} - -// evaluationLnDomainBig returns the evaluation of (x^{n}-1)/(x-g^{n-1}) on -// x^{n}-1 on FrMultiplicativeGen*< g > -func evaluationLnDomainBig(domainBig *fft.Domain) ([2]fr.Element, []fr.Element) { - - sizeDomainSmall := domainBig.Cardinality / 2 - - var one fr.Element - one.SetOne() - - // x^{n}-1 on FrMultiplicativeGen*< g > - numLn := evaluateXnMinusOneDomainBig(domainBig) - - // 1/(x-g^{n-1}) on FrMultiplicativeGen*< g > - var gg, acc fr.Element - gg.Square(&domainBig.Generator).Exp(gg, big.NewInt(int64(sizeDomainSmall-1))) - denLn := make([]fr.Element, domainBig.Cardinality) - acc.Set(&domainBig.FrMultiplicativeGen) - for i := 0; i < int(domainBig.Cardinality); i++ { - denLn[i].Sub(&acc, &gg) - acc.Mul(&acc, &domainBig.Generator) - } - denLn = fr.BatchInvert(denLn) - - return numLn, denLn - -} - -// evaluateZStartsByOneBitReversed returns l0 * (z-1), in Lagrange basis and bit reversed order -func evaluateZStartsByOneBitReversed(lsZBitReversed []fr.Element, domainBig *fft.Domain) []fr.Element { - - var one fr.Element - one.SetOne() - - res := make([]fr.Element, domainBig.Cardinality) - - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - xnMinusOne, denL0 := evaluateL0DomainBig(domainBig) - - for i := 0; i < len(lsZBitReversed); i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - res[_i].Sub(&lsZBitReversed[_i], &one). - Mul(&res[_i], &xnMinusOne[i%2]). - Mul(&res[_i], &denL0[i]) - } - - return res -} - -// evaluateZEndsByOneBitReversed returns ln * (z-1), in Lagrange basis and bit reversed order -func evaluateZEndsByOneBitReversed(lsZBitReversed []fr.Element, domainBig *fft.Domain) []fr.Element { - - var one fr.Element - one.SetOne() - - numLn, denLn := evaluationLnDomainBig(domainBig) - - res := make([]fr.Element, len(lsZBitReversed)) - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - for i := 0; i < len(lsZBitReversed); i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - res[_i].Sub(&lsZBitReversed[_i], &one). - Mul(&res[_i], &numLn[i%2]). - Mul(&res[_i], &denLn[i]) - } - - return res -} - -// evaluateOverlapH1h2BitReversed returns ln * (h1 - h2(g.x)), in Lagrange basis and bit reversed order -func evaluateOverlapH1h2BitReversed(_lh1, _lh2 []fr.Element, domainBig *fft.Domain) []fr.Element { - - var one fr.Element - one.SetOne() - - numLn, denLn := evaluationLnDomainBig(domainBig) - - res := make([]fr.Element, len(_lh1)) - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - s := len(_lh1) - for i := 0; i < s; i++ { - - _i := int(bits.Reverse64(uint64(i)) >> nn) - _is := int(bits.Reverse64(uint64((i+2)%s)) >> nn) - - res[_i].Sub(&_lh1[_i], &_lh2[_is]). - Mul(&res[_i], &numLn[i%2]). - Mul(&res[_i], &denLn[i]) - } - - return res -} - -// computeQuotientCanonical computes the full quotient of the plookup protocol. -// * alpha is the challenge to fold the numerator -// * lh, lh0, lhn, lh1h2 are the various pieces of the numerator (Lagrange shifted form, bit reversed order) -// * domainBig fft domain -// It returns the quotient, in canonical basis -func computeQuotientCanonical(alpha fr.Element, lh, lh0, lhn, lh1h2 []fr.Element, domainBig *fft.Domain) []fr.Element { - - sizeDomainBig := int(domainBig.Cardinality) - res := make([]fr.Element, sizeDomainBig) - - var one fr.Element - one.SetOne() - - numLn := evaluateXnMinusOneDomainBig(domainBig) - numLn[0].Inverse(&numLn[0]) - numLn[1].Inverse(&numLn[1]) - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - for i := 0; i < sizeDomainBig; i++ { - - _i := int(bits.Reverse64(uint64(i)) >> nn) - - res[_i].Mul(&lh1h2[_i], &alpha). - Add(&res[_i], &lhn[_i]). - Mul(&res[_i], &alpha). - Add(&res[_i], &lh0[_i]). - Mul(&res[_i], &alpha). - Add(&res[_i], &lh[_i]). - Mul(&res[_i], &numLn[i%2]) - } - - domainBig.FFTInverse(res, fft.DIT, true) - - return res -} - -// ProveLookupVector returns proof that the values in f are in t. -// -// /!\IMPORTANT/!\ -// -// If the table t is already commited somewhere (which is the normal workflow -// before generating a lookup proof), the commitment needs to be done on the -// table sorted. Otherwise the commitment in proof.t will not be the same as -// the public commitment: it will contain the same values, but permuted. -func ProveLookupVector(srs *kzg.SRS, f, t Table) (ProofLookupVector, error) { - - // res - var proof ProofLookupVector - var err error - - // hash function used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "beta", "gamma", "alpha", "nu") - - // create domains - var domainSmall *fft.Domain - if len(t) <= len(f) { - domainSmall = fft.NewDomain(uint64(len(f) + 1)) - } else { - domainSmall = fft.NewDomain(uint64(len(t))) - } - sizeDomainSmall := int(domainSmall.Cardinality) - - // set the size - proof.size = domainSmall.Cardinality - - // set the generator - proof.g.Set(&domainSmall.Generator) - - // resize f and t - // note: the last element of lf does not matter - lf := make([]fr.Element, sizeDomainSmall) - lt := make([]fr.Element, sizeDomainSmall) - cf := make([]fr.Element, sizeDomainSmall) - ct := make([]fr.Element, sizeDomainSmall) - copy(lt, t) - copy(lf, f) - for i := len(f); i < sizeDomainSmall; i++ { - lf[i] = f[len(f)-1] - } - for i := len(t); i < sizeDomainSmall; i++ { - lt[i] = t[len(t)-1] - } - sort.Sort(Table(lt)) - copy(ct, lt) - copy(cf, lf) - domainSmall.FFTInverse(ct, fft.DIF) - domainSmall.FFTInverse(cf, fft.DIF) - fft.BitReverse(ct) - fft.BitReverse(cf) - proof.t, err = kzg.Commit(ct, srs) - if err != nil { - return proof, err - } - proof.f, err = kzg.Commit(cf, srs) - if err != nil { - return proof, err - } - - // write f sorted by t - lfSortedByt := make(Table, 2*domainSmall.Cardinality-1) - copy(lfSortedByt, lt) - copy(lfSortedByt[domainSmall.Cardinality:], lf) - sort.Sort(lfSortedByt) - - // compute h1, h2, commit to them - lh1 := make([]fr.Element, sizeDomainSmall) - lh2 := make([]fr.Element, sizeDomainSmall) - ch1 := make([]fr.Element, sizeDomainSmall) - ch2 := make([]fr.Element, sizeDomainSmall) - copy(lh1, lfSortedByt[:sizeDomainSmall]) - copy(lh2, lfSortedByt[sizeDomainSmall-1:]) - - copy(ch1, lfSortedByt[:sizeDomainSmall]) - copy(ch2, lfSortedByt[sizeDomainSmall-1:]) - domainSmall.FFTInverse(ch1, fft.DIF) - domainSmall.FFTInverse(ch2, fft.DIF) - fft.BitReverse(ch1) - fft.BitReverse(ch2) - - proof.h1, err = kzg.Commit(ch1, srs) - if err != nil { - return proof, err - } - proof.h2, err = kzg.Commit(ch2, srs) - if err != nil { - return proof, err - } - - // derive beta, gamma - beta, err := deriveRandomness(&fs, "beta", &proof.t, &proof.f, &proof.h1, &proof.h2) - if err != nil { - return proof, err - } - gamma, err := deriveRandomness(&fs, "gamma") - if err != nil { - return proof, err - } - - // Compute to Z - lz := evaluateAccumulationPolynomial(lf, lt, lh1, lh2, beta, gamma) - cz := make([]fr.Element, len(lz)) - copy(cz, lz) - domainSmall.FFTInverse(cz, fft.DIF) - fft.BitReverse(cz) - proof.z, err = kzg.Commit(cz, srs) - if err != nil { - return proof, err - } - - // prepare data for computing the quotient - // compute the numerator - s := domainSmall.Cardinality - domainBig := fft.NewDomain(uint64(2 * s)) - - _lz := make([]fr.Element, 2*s) - _lh1 := make([]fr.Element, 2*s) - _lh2 := make([]fr.Element, 2*s) - _lt := make([]fr.Element, 2*s) - _lf := make([]fr.Element, 2*s) - copy(_lz, cz) - copy(_lh1, ch1) - copy(_lh2, ch2) - copy(_lt, ct) - copy(_lf, cf) - domainBig.FFT(_lz, fft.DIF, true) - domainBig.FFT(_lh1, fft.DIF, true) - domainBig.FFT(_lh2, fft.DIF, true) - domainBig.FFT(_lt, fft.DIF, true) - domainBig.FFT(_lf, fft.DIF, true) - - // compute h - lh := evaluateNumBitReversed(_lz, _lh1, _lh2, _lt, _lf, beta, gamma, domainBig) - - // compute l0*(z-1) - lh0 := evaluateZStartsByOneBitReversed(_lz, domainBig) - - // compute ln(z-1) - lhn := evaluateZEndsByOneBitReversed(_lz, domainBig) - - // compute ln*(h1-h2(g*X)) - lh1h2 := evaluateOverlapH1h2BitReversed(_lh1, _lh2, domainBig) - - // compute the quotient - alpha, err := deriveRandomness(&fs, "alpha", &proof.z) - if err != nil { - return proof, err - } - ch := computeQuotientCanonical(alpha, lh, lh0, lhn, lh1h2, domainBig) - proof.h, err = kzg.Commit(ch, srs) - if err != nil { - return proof, err - } - - // build the opening proofs - nu, err := deriveRandomness(&fs, "nu", &proof.h) - if err != nil { - return proof, err - } - proof.BatchedProof, err = kzg.BatchOpenSinglePoint( - [][]fr.Element{ - ch1, - ch2, - ct, - cz, - cf, - ch, - }, - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - proof.f, - proof.h, - }, - nu, - hFunc, - srs, - ) - if err != nil { - return proof, err - } - - nu.Mul(&nu, &domainSmall.Generator) - proof.BatchedProofShifted, err = kzg.BatchOpenSinglePoint( - [][]fr.Element{ - ch1, - ch2, - ct, - cz, - }, - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - }, - nu, - hFunc, - srs, - ) - if err != nil { - return proof, err - } - - return proof, nil -} - -// VerifyLookupVector verifies that a ProofLookupVector proof is correct -func VerifyLookupVector(srs *kzg.SRS, proof ProofLookupVector) error { - - // hash function that is used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "beta", "gamma", "alpha", "nu") - - // derive the various challenges - beta, err := deriveRandomness(&fs, "beta", &proof.t, &proof.f, &proof.h1, &proof.h2) - if err != nil { - return err - } - - gamma, err := deriveRandomness(&fs, "gamma") - if err != nil { - return err - } - - alpha, err := deriveRandomness(&fs, "alpha", &proof.z) - if err != nil { - return err - } - - nu, err := deriveRandomness(&fs, "nu", &proof.h) - if err != nil { - return err - } - - // check opening proofs - err = kzg.BatchVerifySinglePoint( - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - proof.f, - proof.h, - }, - &proof.BatchedProof, - nu, - hFunc, - srs, - ) - if err != nil { - return err - } - - // shift the point and verify shifted proof - var shiftedNu fr.Element - shiftedNu.Mul(&nu, &proof.g) - err = kzg.BatchVerifySinglePoint( - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - }, - &proof.BatchedProofShifted, - shiftedNu, - hFunc, - srs, - ) - if err != nil { - return err - } - - // check the generator is correct - var checkOrder, one fr.Element - one.SetOne() - checkOrder.Exp(proof.g, big.NewInt(int64(proof.size/2))) - if checkOrder.Equal(&one) { - return ErrGenerator - } - checkOrder.Square(&checkOrder) - if !checkOrder.Equal(&one) { - return ErrGenerator - } - - // check polynomial relation using Schwartz Zippel - var lhs, rhs, nun, g, _g, a, v, w fr.Element - g.Exp(proof.g, big.NewInt(int64(proof.size-1))) - - v.Add(&one, &beta) - w.Mul(&v, &gamma) - - // h(ν) where - // h = (xⁿ⁻¹-1)*z*(1+β)*(γ+f)*(γ(1+β) + t+ β*t(gX)) - - // (xⁿ⁻¹-1)*z(gX)*(γ(1+β) + h₁ + β*h₁(gX))*(γ(1+β) + h₂ + β*h₂(gX) ) - lhs.Sub(&nu, &g). // (ν-gⁿ⁻¹) - Mul(&lhs, &proof.BatchedProof.ClaimedValues[3]). - Mul(&lhs, &v) - a.Add(&gamma, &proof.BatchedProof.ClaimedValues[4]) - lhs.Mul(&lhs, &a) - a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[2]). - Add(&a, &proof.BatchedProof.ClaimedValues[2]). - Add(&a, &w) - lhs.Mul(&lhs, &a) - - rhs.Sub(&nu, &g). - Mul(&rhs, &proof.BatchedProofShifted.ClaimedValues[3]) - a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[0]). - Add(&a, &proof.BatchedProof.ClaimedValues[0]). - Add(&a, &w) - rhs.Mul(&rhs, &a) - a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[1]). - Add(&a, &proof.BatchedProof.ClaimedValues[1]). - Add(&a, &w) - rhs.Mul(&rhs, &a) - - lhs.Sub(&lhs, &rhs) - - // check consistancy of bounds - var l0, ln, d1, d2 fr.Element - l0.Exp(nu, big.NewInt(int64(proof.size))).Sub(&l0, &one) - ln.Set(&l0) - d1.Sub(&nu, &one) - d2.Sub(&nu, &g) - l0.Div(&l0, &d1) // (νⁿ-1)/(ν-1) - ln.Div(&ln, &d2) // (νⁿ-1)/(ν-gⁿ⁻¹) - - // l₀*(z-1) = (νⁿ-1)/(ν-1)*(z-1) - var l0z fr.Element - l0z.Sub(&proof.BatchedProof.ClaimedValues[3], &one). - Mul(&l0z, &l0) - - // lₙ*(z-1) = (νⁿ-1)/(ν-gⁿ⁻¹)*(z-1) - var lnz fr.Element - lnz.Sub(&proof.BatchedProof.ClaimedValues[3], &one). - Mul(&ln, &lnz) - - // lₙ*(h1 - h₂(g.x)) - var lnh1h2 fr.Element - lnh1h2.Sub(&proof.BatchedProof.ClaimedValues[0], &proof.BatchedProofShifted.ClaimedValues[1]). - Mul(&lnh1h2, &ln) - - // fold the numerator - lnh1h2.Mul(&lnh1h2, &alpha). - Add(&lnh1h2, &lnz). - Mul(&lnh1h2, &alpha). - Add(&lnh1h2, &l0z). - Mul(&lnh1h2, &alpha). - Add(&lnh1h2, &lhs) - - // (xⁿ-1) * h(x) evaluated at ν - nun.Exp(nu, big.NewInt(int64(proof.size))) - _g.Sub(&nun, &one) - _g.Mul(&proof.BatchedProof.ClaimedValues[5], &_g) - if !lnh1h2.Equal(&_g) { - return ErrPlookupVerification - } - - return nil -} diff --git a/tools/gnark/bls12_381/fr/polynomial/doc.go b/tools/gnark/bls12_381/fr/polynomial/doc.go deleted file mode 100644 index 83479b05..00000000 --- a/tools/gnark/bls12_381/fr/polynomial/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package polynomial provides polynomial methods and commitment schemes. -package polynomial diff --git a/tools/gnark/bls12_381/fr/polynomial/multilin.go b/tools/gnark/bls12_381/fr/polynomial/multilin.go deleted file mode 100644 index f668c389..00000000 --- a/tools/gnark/bls12_381/fr/polynomial/multilin.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package polynomial - -import ( - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "math/bits" -) - -// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial -// The variables are X₁ through Xₙ where n = log(len(.)) -// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ) -// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial -type MultiLin []fr.Element - -// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r -func (m *MultiLin) Fold(r fr.Element) { - mid := len(*m) / 2 - - bottom, top := (*m)[:mid], (*m)[mid:] - - // updating bookkeeping table - // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0)) - // the following loop computes the evaluations of f(r) accordingly: - // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ)) - for i := 0; i < mid; i++ { - // table[i] ← table[i] + r (table[i + mid] - table[i]) - top[i].Sub(&top[i], &bottom[i]) - top[i].Mul(&top[i], &r) - bottom[i].Add(&bottom[i], &top[i]) - } - - *m = (*m)[:mid] -} - -func (m MultiLin) Sum() fr.Element { - s := m[0] - for i := 1; i < len(m); i++ { - s.Add(&s, &m[i]) - } - return s -} - -func _clone(m MultiLin, p *Pool) MultiLin { - if p == nil { - return m.Clone() - } else { - return p.Clone(m) - } -} - -func _dump(m MultiLin, p *Pool) { - if p != nil { - p.Dump(m) - } -} - -// Evaluate extrapolate the value of the multilinear polynomial corresponding to m -// on the given coordinates -func (m MultiLin) Evaluate(coordinates []fr.Element, p *Pool) fr.Element { - // Folding is a mutating operation - bkCopy := _clone(m, p) - - // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable) - for _, r := range coordinates { - bkCopy.Fold(r) - } - - result := bkCopy[0] - - _dump(bkCopy, p) - return result -} - -// Clone creates a deep copy of a bookkeeping table. -// Both multilinear interpolation and sumcheck require folding an underlying -// array, but folding changes the array. To do both one requires a deep copy -// of the bookkeeping table. -func (m MultiLin) Clone() MultiLin { - res := make(MultiLin, len(m)) - copy(res, m) - return res -} - -// Add two bookKeepingTables -func (m *MultiLin) Add(left, right MultiLin) { - size := len(left) - // Check that left and right have the same size - if len(right) != size || len(*m) != size { - panic("left, right and destination must have the right size") - } - - // Add elementwise - for i := 0; i < size; i++ { - (*m)[i].Add(&left[i], &right[i]) - } -} - -// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ) -// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates -// -// _________________ -// | | | -// | 0 | 1 | -// |_______|_______| -// y | | | -// | 1 | 0 | -// |_______|_______| -// -// x -// -// In other words the polynomial evaluated here is the multilinear extrapolation of -// one that evaluates to q' == h' for vectors q', h' of binary values -func EvalEq(q, h []fr.Element) fr.Element { - var res, nxt, one, sum fr.Element - one.SetOne() - for i := 0; i < len(q); i++ { - nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ - nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ - nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ - sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel? - - if i == 0 { - res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ - } else { - nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ - res.Mul(&res, &nxt) // res <- res * nxt - } - } - return res -} - -// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0] -func (m *MultiLin) Eq(q []fr.Element) { - n := len(q) - - if len(*m) != 1<= 0; i-- { - res.Mul(&res, v) - res.Add(&res, &(*p)[i]) - } - - return res -} - -// Clone returns a copy of the polynomial -func (p *Polynomial) Clone() Polynomial { - _p := make(Polynomial, len(*p)) - copy(_p, *p) - return _p -} - -// Set to another polynomial -func (p *Polynomial) Set(p1 Polynomial) { - if len(*p) != len(p1) { - *p = p1.Clone() - return - } - - for i := 0; i < len(p1); i++ { - (*p)[i].Set(&p1[i]) - } -} - -// AddConstantInPlace adds a constant to the polynomial, modifying p -func (p *Polynomial) AddConstantInPlace(c *fr.Element) { - for i := 0; i < len(*p); i++ { - (*p)[i].Add(&(*p)[i], c) - } -} - -// SubConstantInPlace subs a constant to the polynomial, modifying p -func (p *Polynomial) SubConstantInPlace(c *fr.Element) { - for i := 0; i < len(*p); i++ { - (*p)[i].Sub(&(*p)[i], c) - } -} - -// ScaleInPlace multiplies p by v, modifying p -func (p *Polynomial) ScaleInPlace(c *fr.Element) { - for i := 0; i < len(*p); i++ { - (*p)[i].Mul(&(*p)[i], c) - } -} - -// Scale multiplies p0 by v, storing the result in p -func (p *Polynomial) Scale(c *fr.Element, p0 Polynomial) { - if len(*p) != len(p0) { - *p = make(Polynomial, len(p0)) - } - for i := 0; i < len(p0); i++ { - (*p)[i].Mul(c, &p0[i]) - } -} - -// Add adds p1 to p2 -// This function allocates a new slice unless p == p1 or p == p2 -func (p *Polynomial) Add(p1, p2 Polynomial) *Polynomial { - - bigger := p1 - smaller := p2 - if len(bigger) < len(smaller) { - bigger, smaller = smaller, bigger - } - - if len(*p) == len(bigger) && (&(*p)[0] == &bigger[0]) { - for i := 0; i < len(smaller); i++ { - (*p)[i].Add(&(*p)[i], &smaller[i]) - } - return p - } - - if len(*p) == len(smaller) && (&(*p)[0] == &smaller[0]) { - for i := 0; i < len(smaller); i++ { - (*p)[i].Add(&(*p)[i], &bigger[i]) - } - *p = append(*p, bigger[len(smaller):]...) - return p - } - - res := make(Polynomial, len(bigger)) - copy(res, bigger) - for i := 0; i < len(smaller); i++ { - res[i].Add(&res[i], &smaller[i]) - } - *p = res - return p -} - -// Sub subtracts p2 from p1 -// TODO make interface more consistent with Add -func (p *Polynomial) Sub(p1, p2 Polynomial) *Polynomial { - if len(p1) != len(p2) || len(p2) != len(*p) { - return nil - } - for i := 0; i < len(*p); i++ { - (*p)[i].Sub(&p1[i], &p2[i]) - } - return p -} - -// Equal checks equality between two polynomials -func (p *Polynomial) Equal(p1 Polynomial) bool { - if (*p == nil) != (p1 == nil) { - return false - } - - if len(*p) != len(p1) { - return false - } - - for i := range p1 { - if !(*p)[i].Equal(&p1[i]) { - return false - } - } - - return true -} - -func (p Polynomial) SetZero() { - for i := 0; i < len(p); i++ { - p[i].SetZero() - } -} - -func (p Polynomial) Text(base int) string { - - var builder strings.Builder - - first := true - for d := len(p) - 1; d >= 0; d-- { - if p[d].IsZero() { - continue - } - - pD := p[d] - pDText := pD.Text(base) - - initialLen := builder.Len() - - if pDText[0] == '-' { - pDText = pDText[1:] - if first { - builder.WriteString("-") - } else { - builder.WriteString(" - ") - } - } else if !first { - builder.WriteString(" + ") - } - - first = false - - if !pD.IsOne() || d == 0 { - builder.WriteString(pDText) - } - - if builder.Len()-initialLen > 10 { - builder.WriteString("×") - } - - if d != 0 { - builder.WriteString("X") - } - if d > 1 { - builder.WriteString( - utils.ToSuperscript(strconv.Itoa(d)), - ) - } - - } - - if first { - return "0" - } - - return builder.String() -} diff --git a/tools/gnark/bls12_381/fr/polynomial/polynomial_test.go b/tools/gnark/bls12_381/fr/polynomial/polynomial_test.go deleted file mode 100644 index 5df4aeba..00000000 --- a/tools/gnark/bls12_381/fr/polynomial/polynomial_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package polynomial - -import ( - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/stretchr/testify/assert" - "math/big" - "testing" -) - -func TestPolynomialEval(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // random value - var point fr.Element - point.SetRandom() - - // compute manually f(val) - var expectedEval, one, den fr.Element - var expo big.Int - one.SetOne() - expo.SetUint64(20) - expectedEval.Exp(point, &expo). - Sub(&expectedEval, &one) - den.Sub(&point, &one) - expectedEval.Div(&expectedEval, &den) - - // compute purported evaluation - purportedEval := f.Eval(&point) - - // check - if !purportedEval.Equal(&expectedEval) { - t.Fatal("polynomial evaluation failed") - } -} - -func TestPolynomialAddConstantInPlace(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // constant to add - var c fr.Element - c.SetRandom() - - // add constant - f.AddConstantInPlace(&c) - - // check - var expectedCoeffs, one fr.Element - one.SetOne() - expectedCoeffs.Add(&one, &c) - for i := 0; i < 20; i++ { - if !f[i].Equal(&expectedCoeffs) { - t.Fatal("AddConstantInPlace failed") - } - } -} - -func TestPolynomialSubConstantInPlace(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // constant to sub - var c fr.Element - c.SetRandom() - - // sub constant - f.SubConstantInPlace(&c) - - // check - var expectedCoeffs, one fr.Element - one.SetOne() - expectedCoeffs.Sub(&one, &c) - for i := 0; i < 20; i++ { - if !f[i].Equal(&expectedCoeffs) { - t.Fatal("SubConstantInPlace failed") - } - } -} - -func TestPolynomialScaleInPlace(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // constant to scale by - var c fr.Element - c.SetRandom() - - // scale by constant - f.ScaleInPlace(&c) - - // check - for i := 0; i < 20; i++ { - if !f[i].Equal(&c) { - t.Fatal("ScaleInPlace failed") - } - } - -} - -func TestPolynomialAdd(t *testing.T) { - - // build unbalanced polynomials - f1 := make(Polynomial, 20) - f1Backup := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f1[i].SetOne() - f1Backup[i].SetOne() - } - f2 := make(Polynomial, 10) - f2Backup := make(Polynomial, 10) - for i := 0; i < 10; i++ { - f2[i].SetOne() - f2Backup[i].SetOne() - } - - // expected result - var one, two fr.Element - one.SetOne() - two.Double(&one) - expectedSum := make(Polynomial, 20) - for i := 0; i < 10; i++ { - expectedSum[i].Set(&two) - } - for i := 10; i < 20; i++ { - expectedSum[i].Set(&one) - } - - // caller is empty - var g Polynomial - g.Add(f1, f2) - if !g.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !f1.Equal(f1Backup) { - t.Fatal("side effect, f1 should not have been modified") - } - if !f2.Equal(f2Backup) { - t.Fatal("side effect, f2 should not have been modified") - } - - // all operands are distincts - _f1 := f1.Clone() - _f1.Add(f1, f2) - if !_f1.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !f1.Equal(f1Backup) { - t.Fatal("side effect, f1 should not have been modified") - } - if !f2.Equal(f2Backup) { - t.Fatal("side effect, f2 should not have been modified") - } - - // first operand = caller - _f1 = f1.Clone() - _f2 := f2.Clone() - _f1.Add(_f1, _f2) - if !_f1.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !_f2.Equal(f2Backup) { - t.Fatal("side effect, _f2 should not have been modified") - } - - // second operand = caller - _f1 = f1.Clone() - _f2 = f2.Clone() - _f1.Add(_f2, _f1) - if !_f1.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !_f2.Equal(f2Backup) { - t.Fatal("side effect, _f2 should not have been modified") - } -} - -func TestPolynomialText(t *testing.T) { - var one, negTwo fr.Element - one.SetOne() - negTwo.SetInt64(-2) - - p := Polynomial{one, negTwo, one} - - assert.Equal(t, "X² - 2X + 1", p.Text(10)) -} diff --git a/tools/gnark/bls12_381/fr/polynomial/pool.go b/tools/gnark/bls12_381/fr/polynomial/pool.go deleted file mode 100644 index 81132603..00000000 --- a/tools/gnark/bls12_381/fr/polynomial/pool.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package polynomial - -import ( - "encoding/json" - "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "reflect" - "runtime" - "sort" - "sync" - "unsafe" -) - -// Memory management for polynomials -// WARNING: This is not thread safe TODO: Make sure that is not a problem -// TODO: There is a lot of "unsafe" memory management here and needs to be vetted thoroughly - -type sizedPool struct { - maxN int - pool sync.Pool - stats poolStats -} - -type inUseData struct { - allocatedFor []uintptr - pool *sizedPool -} - -type Pool struct { - //lock sync.Mutex - inUse map[*fr.Element]inUseData - subPools []sizedPool -} - -func (p *sizedPool) get(n int) *fr.Element { - p.stats.maake(n) - return p.pool.Get().(*fr.Element) -} - -func (p *sizedPool) put(ptr *fr.Element) { - p.stats.dump() - p.pool.Put(ptr) -} - -func NewPool(maxN ...int) (pool Pool) { - - sort.Ints(maxN) - pool = Pool{ - inUse: make(map[*fr.Element]inUseData), - subPools: make([]sizedPool, len(maxN)), - } - - for i := range pool.subPools { - subPool := &pool.subPools[i] - subPool.maxN = maxN[i] - subPool.pool = sync.Pool{ - New: func() interface{} { - subPool.stats.Allocated++ - return getDataPointer(make([]fr.Element, 0, subPool.maxN)) - }, - } - } - return -} - -func (p *Pool) findCorrespondingPool(n int) *sizedPool { - poolI := 0 - for poolI < len(p.subPools) && n > p.subPools[poolI].maxN { - poolI++ - } - return &p.subPools[poolI] // out of bounds error here would mean that n is too large -} - -func (p *Pool) Make(n int) []fr.Element { - pool := p.findCorrespondingPool(n) - ptr := pool.get(n) - p.addInUse(ptr, pool) - return unsafe.Slice(ptr, n) -} - -// Dump dumps a set of polynomials into the pool -func (p *Pool) Dump(slices ...[]fr.Element) { - for _, slice := range slices { - ptr := getDataPointer(slice) - if metadata, ok := p.inUse[ptr]; ok { - delete(p.inUse, ptr) - metadata.pool.put(ptr) - } else { - panic("attempting to dump a slice not created by the pool") - } - } -} - -func (p *Pool) addInUse(ptr *fr.Element, pool *sizedPool) { - pcs := make([]uintptr, 2) - n := runtime.Callers(3, pcs) - - if prevPcs, ok := p.inUse[ptr]; ok { // TODO: remove if unnecessary for security - panic(fmt.Errorf("re-allocated non-dumped slice, previously allocated at %v", runtime.CallersFrames(prevPcs.allocatedFor))) - } - p.inUse[ptr] = inUseData{ - allocatedFor: pcs[:n], - pool: pool, - } -} - -func printFrame(frame runtime.Frame) { - fmt.Printf("\t%s line %d, function %s\n", frame.File, frame.Line, frame.Function) -} - -func (p *Pool) printInUse() { - fmt.Println("slices never dumped allocated at:") - for _, pcs := range p.inUse { - fmt.Println("-------------------------") - - var frame runtime.Frame - frames := runtime.CallersFrames(pcs.allocatedFor) - more := true - for more { - frame, more = frames.Next() - printFrame(frame) - } - } -} - -type poolStats struct { - Used int - Allocated int - ReuseRate float64 - InUse int - GreatestNUsed int - SmallestNUsed int -} - -type poolsStats struct { - SubPools []poolStats - InUse int -} - -func (s *poolStats) maake(n int) { - s.Used++ - s.InUse++ - if n > s.GreatestNUsed { - s.GreatestNUsed = n - } - if s.SmallestNUsed == 0 || s.SmallestNUsed > n { - s.SmallestNUsed = n - } -} - -func (s *poolStats) dump() { - s.InUse-- -} - -func (s *poolStats) finalize() { - s.ReuseRate = float64(s.Used) / float64(s.Allocated) -} - -func getDataPointer(slice []fr.Element) *fr.Element { - header := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - return (*fr.Element)(unsafe.Pointer(header.Data)) -} - -func (p *Pool) PrintPoolStats() { - InUse := 0 - subStats := make([]poolStats, len(p.subPools)) - for i := range p.subPools { - subPool := &p.subPools[i] - subPool.stats.finalize() - subStats[i] = subPool.stats - InUse += subPool.stats.InUse - } - - poolsStats := poolsStats{ - SubPools: subStats, - InUse: InUse, - } - serialized, _ := json.MarshalIndent(poolsStats, "", " ") - fmt.Println(string(serialized)) - p.printInUse() -} - -func (p *Pool) Clone(slice []fr.Element) []fr.Element { - res := p.Make(len(slice)) - copy(res, slice) - return res -} diff --git a/tools/gnark/bls12_381/fr/sumcheck/sumcheck.go b/tools/gnark/bls12_381/fr/sumcheck/sumcheck.go deleted file mode 100644 index a39dc48a..00000000 --- a/tools/gnark/bls12_381/fr/sumcheck/sumcheck.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package sumcheck - -import ( - "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/polynomial" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "strconv" -) - -// This does not make use of parallelism and represents polynomials as lists of coefficients -// It is currently geared towards arithmetic hashes. Once we have a more unified hash function interface, this can be generified. - -// Claims to a multi-sumcheck statement. i.e. one of the form ∑_{0≤i<2ⁿ} fⱼ(i) = cⱼ for 1 ≤ j ≤ m. -// Later evolving into a claim of the form gⱼ = ∑_{0≤i<2ⁿ⁻ʲ} g(r₁, r₂, ..., rⱼ₋₁, Xⱼ, i...) -type Claims interface { - Combine(a fr.Element) polynomial.Polynomial // Combine into the 0ᵗʰ sumcheck subclaim. Create g := ∑_{1≤j≤m} aʲ⁻¹fⱼ for which now we seek to prove ∑_{0≤i<2ⁿ} g(i) = c := ∑_{1≤j≤m} aʲ⁻¹cⱼ. Return g₁. - Next(fr.Element) polynomial.Polynomial // Return the evaluations gⱼ(k) for 1 ≤ k < degⱼ(g). Update the claim to gⱼ₊₁ for the input value as rⱼ - VarsNum() int //number of variables - ClaimsNum() int //number of claims - ProveFinalEval(r []fr.Element) interface{} //in case it is difficult for the verifier to compute g(r₁, ..., rₙ) on its own, the prover can provide the value and a proof -} - -// LazyClaims is the Claims data structure on the verifier side. It is "lazy" in that it has to compute fewer things. -type LazyClaims interface { - ClaimsNum() int // ClaimsNum = m - VarsNum() int // VarsNum = n - CombinedSum(a fr.Element) fr.Element // CombinedSum returns c = ∑_{1≤j≤m} aʲ⁻¹cⱼ - Degree(i int) int //Degree of the total claim in the i'th variable - VerifyFinalEval(r []fr.Element, combinationCoeff fr.Element, purportedValue fr.Element, proof interface{}) error -} - -// Proof of a multi-sumcheck statement. -type Proof struct { - PartialSumPolys []polynomial.Polynomial `json:"partialSumPolys"` - FinalEvalProof interface{} `json:"finalEvalProof"` //in case it is difficult for the verifier to compute g(r₁, ..., rₙ) on its own, the prover can provide the value and a proof -} - -func setupTranscript(claimsNum int, varsNum int, settings *fiatshamir.Settings) (challengeNames []string, err error) { - numChallenges := varsNum - if claimsNum >= 2 { - numChallenges++ - } - challengeNames = make([]string, numChallenges) - if claimsNum >= 2 { - challengeNames[0] = settings.Prefix + "comb" - } - prefix := settings.Prefix + "pSP." - for i := 0; i < varsNum; i++ { - challengeNames[i+numChallenges-varsNum] = prefix + strconv.Itoa(i) - } - if settings.Transcript == nil { - transcript := fiatshamir.NewTranscript(settings.Hash, challengeNames...) - settings.Transcript = &transcript - } - - for i := range settings.BaseChallenges { - if err = settings.Transcript.Bind(challengeNames[0], settings.BaseChallenges[i]); err != nil { - return - } - } - return -} - -func next(transcript *fiatshamir.Transcript, bindings []fr.Element, remainingChallengeNames *[]string) (fr.Element, error) { - challengeName := (*remainingChallengeNames)[0] - for i := range bindings { - bytes := bindings[i].Bytes() - if err := transcript.Bind(challengeName, bytes[:]); err != nil { - return fr.Element{}, err - } - } - var res fr.Element - bytes, err := transcript.ComputeChallenge(challengeName) - res.SetBytes(bytes) - - *remainingChallengeNames = (*remainingChallengeNames)[1:] - - return res, err -} - -// Prove create a non-interactive sumcheck proof -func Prove(claims Claims, transcriptSettings fiatshamir.Settings) (Proof, error) { - - var proof Proof - remainingChallengeNames, err := setupTranscript(claims.ClaimsNum(), claims.VarsNum(), &transcriptSettings) - transcript := transcriptSettings.Transcript - if err != nil { - return proof, err - } - - var combinationCoeff fr.Element - if claims.ClaimsNum() >= 2 { - if combinationCoeff, err = next(transcript, []fr.Element{}, &remainingChallengeNames); err != nil { - return proof, err - } - } - - varsNum := claims.VarsNum() - proof.PartialSumPolys = make([]polynomial.Polynomial, varsNum) - proof.PartialSumPolys[0] = claims.Combine(combinationCoeff) - challenges := make([]fr.Element, varsNum) - - for j := 0; j+1 < varsNum; j++ { - if challenges[j], err = next(transcript, proof.PartialSumPolys[j], &remainingChallengeNames); err != nil { - return proof, err - } - proof.PartialSumPolys[j+1] = claims.Next(challenges[j]) - } - - if challenges[varsNum-1], err = next(transcript, proof.PartialSumPolys[varsNum-1], &remainingChallengeNames); err != nil { - return proof, err - } - - proof.FinalEvalProof = claims.ProveFinalEval(challenges) - - return proof, nil -} - -func Verify(claims LazyClaims, proof Proof, transcriptSettings fiatshamir.Settings) error { - remainingChallengeNames, err := setupTranscript(claims.ClaimsNum(), claims.VarsNum(), &transcriptSettings) - transcript := transcriptSettings.Transcript - if err != nil { - return err - } - - var combinationCoeff fr.Element - - if claims.ClaimsNum() >= 2 { - if combinationCoeff, err = next(transcript, []fr.Element{}, &remainingChallengeNames); err != nil { - return err - } - } - - r := make([]fr.Element, claims.VarsNum()) - - // Just so that there is enough room for gJ to be reused - maxDegree := claims.Degree(0) - for j := 1; j < claims.VarsNum(); j++ { - if d := claims.Degree(j); d > maxDegree { - maxDegree = d - } - } - gJ := make(polynomial.Polynomial, maxDegree+1) //At the end of iteration j, gJ = ∑_{i < 2ⁿ⁻ʲ⁻¹} g(X₁, ..., Xⱼ₊₁, i...) NOTE: n is shorthand for claims.VarsNum() - gJR := claims.CombinedSum(combinationCoeff) // At the beginning of iteration j, gJR = ∑_{i < 2ⁿ⁻ʲ} g(r₁, ..., rⱼ, i...) - - for j := 0; j < claims.VarsNum(); j++ { - if len(proof.PartialSumPolys[j]) != claims.Degree(j) { - return fmt.Errorf("malformed proof") - } - copy(gJ[1:], proof.PartialSumPolys[j]) - gJ[0].Sub(&gJR, &proof.PartialSumPolys[j][0]) // Requirement that gⱼ(0) + gⱼ(1) = gⱼ₋₁(r) - // gJ is ready - - //Prepare for the next iteration - if r[j], err = next(transcript, proof.PartialSumPolys[j], &remainingChallengeNames); err != nil { - return err - } - // This is an extremely inefficient way of interpolating. TODO: Interpolate without symbolically computing a polynomial - gJCoeffs := polynomial.InterpolateOnRange(gJ[:(claims.Degree(j) + 1)]) - gJR = gJCoeffs.Eval(&r[j]) - } - - return claims.VerifyFinalEval(r, combinationCoeff, gJR, proof.FinalEvalProof) -} diff --git a/tools/gnark/bls12_381/fr/sumcheck/sumcheck_test.go b/tools/gnark/bls12_381/fr/sumcheck/sumcheck_test.go deleted file mode 100644 index b62b8a91..00000000 --- a/tools/gnark/bls12_381/fr/sumcheck/sumcheck_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package sumcheck - -import ( - "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/polynomial" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/test_vector_utils" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "github.com/stretchr/testify/assert" - "hash" - "math/bits" - "strings" - "testing" -) - -type singleMultilinClaim struct { - g polynomial.MultiLin -} - -func (c singleMultilinClaim) ProveFinalEval(r []fr.Element) interface{} { - return nil // verifier can compute the final eval itself -} - -func (c singleMultilinClaim) VarsNum() int { - return bits.TrailingZeros(uint(len(c.g))) -} - -func (c singleMultilinClaim) ClaimsNum() int { - return 1 -} - -func sumForX1One(g polynomial.MultiLin) polynomial.Polynomial { - sum := g[len(g)/2] - for i := len(g)/2 + 1; i < len(g); i++ { - sum.Add(&sum, &g[i]) - } - return []fr.Element{sum} -} - -func (c singleMultilinClaim) Combine(fr.Element) polynomial.Polynomial { - return sumForX1One(c.g) -} - -func (c *singleMultilinClaim) Next(r fr.Element) polynomial.Polynomial { - c.g.Fold(r) - return sumForX1One(c.g) -} - -type singleMultilinLazyClaim struct { - g polynomial.MultiLin - claimedSum fr.Element -} - -func (c singleMultilinLazyClaim) VerifyFinalEval(r []fr.Element, combinationCoeff fr.Element, purportedValue fr.Element, proof interface{}) error { - val := c.g.Evaluate(r, nil) - if val.Equal(&purportedValue) { - return nil - } - return fmt.Errorf("mismatch") -} - -func (c singleMultilinLazyClaim) CombinedSum(combinationCoeffs fr.Element) fr.Element { - return c.claimedSum -} - -func (c singleMultilinLazyClaim) Degree(i int) int { - return 1 -} - -func (c singleMultilinLazyClaim) ClaimsNum() int { - return 1 -} - -func (c singleMultilinLazyClaim) VarsNum() int { - return bits.TrailingZeros(uint(len(c.g))) -} - -func testSumcheckSingleClaimMultilin(polyInt []uint64, hashGenerator func() hash.Hash) error { - poly := make(polynomial.MultiLin, len(polyInt)) - for i, n := range polyInt { - poly[i].SetUint64(n) - } - - claim := singleMultilinClaim{g: poly.Clone()} - - proof, err := Prove(&claim, fiatshamir.WithHash(hashGenerator())) - if err != nil { - return err - } - - var sb strings.Builder - for _, p := range proof.PartialSumPolys { - - sb.WriteString("\t{") - for i := 0; i < len(p); i++ { - sb.WriteString(p[i].String()) - if i+1 < len(p) { - sb.WriteString(", ") - } - } - sb.WriteString("}\n") - } - - lazyClaim := singleMultilinLazyClaim{g: poly, claimedSum: poly.Sum()} - if err = Verify(lazyClaim, proof, fiatshamir.WithHash(hashGenerator())); err != nil { - return err - } - - proof.PartialSumPolys[0][0].Add(&proof.PartialSumPolys[0][0], test_vector_utils.ToElement(1)) - lazyClaim = singleMultilinLazyClaim{g: poly, claimedSum: poly.Sum()} - if Verify(lazyClaim, proof, fiatshamir.WithHash(hashGenerator())) == nil { - return fmt.Errorf("bad proof accepted") - } - return nil -} - -func TestSumcheckDeterministicHashSingleClaimMultilin(t *testing.T) { - //printMsws(36) - - polys := [][]uint64{ - {1, 2, 3, 4}, // 1 + 2X₁ + X₂ - {1, 2, 3, 4, 5, 6, 7, 8}, // 1 + 4X₁ + 2X₂ + X₃ - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, // 1 + 8X₁ + 4X₂ + 2X₃ + X₄ - } - - const MaxStep = 4 - const MaxStart = 4 - hashGens := make([]func() hash.Hash, 0, MaxStart*MaxStep) - - for step := 0; step < MaxStep; step++ { - for startState := 0; startState < MaxStart; startState++ { - if step == 0 && startState == 1 { // unlucky case where a bad proof would be accepted - continue - } - hashGens = append(hashGens, test_vector_utils.NewMessageCounterGenerator(startState, step)) - } - } - - for _, poly := range polys { - for _, hashGen := range hashGens { - assert.NoError(t, testSumcheckSingleClaimMultilin(poly, hashGen), - "failed with poly %v and hashGen %v", poly, hashGen()) - } - } -} diff --git a/tools/gnark/bls12_381/fr/test_vector_utils/test_vector_utils.go b/tools/gnark/bls12_381/fr/test_vector_utils/test_vector_utils.go deleted file mode 100644 index 4bfd2a79..00000000 --- a/tools/gnark/bls12_381/fr/test_vector_utils/test_vector_utils.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package test_vector_utils - -import ( - "encoding/json" - "fmt" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/polynomial" - "hash" - - "os" - "path/filepath" - "reflect" - "sort" - "strconv" - "strings" -) - -type ElementTriplet struct { - key1 fr.Element - key2 fr.Element - key2Present bool - value fr.Element - used bool -} - -func (t *ElementTriplet) CmpKey(o *ElementTriplet) int { - if cmp1 := t.key1.Cmp(&o.key1); cmp1 != 0 { - return cmp1 - } - - if t.key2Present { - if o.key2Present { - return t.key2.Cmp(&o.key2) - } - return 1 - } else { - if o.key2Present { - return -1 - } - return 0 - } -} - -var MapCache = make(map[string]*ElementMap) - -func ElementMapFromFile(path string) (*ElementMap, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - if h, ok := MapCache[path]; ok { - return h, nil - } - var bytes []byte - if bytes, err = os.ReadFile(path); err == nil { - var asMap map[string]interface{} - if err = json.Unmarshal(bytes, &asMap); err != nil { - return nil, err - } - - var h ElementMap - if h, err = CreateElementMap(asMap); err == nil { - MapCache[path] = &h - } - - return &h, err - - } else { - return nil, err - } -} - -func CreateElementMap(rawMap map[string]interface{}) (ElementMap, error) { - res := make(ElementMap, 0, len(rawMap)) - - for k, v := range rawMap { - var entry ElementTriplet - if _, err := SetElement(&entry.value, v); err != nil { - return nil, err - } - - key := strings.Split(k, ",") - switch len(key) { - case 1: - entry.key2Present = false - case 2: - entry.key2Present = true - if _, err := SetElement(&entry.key2, key[1]); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("cannot parse %T as one or two field elements", v) - } - if _, err := SetElement(&entry.key1, key[0]); err != nil { - return nil, err - } - - res = append(res, &entry) - } - - res.sort() - return res, nil -} - -type ElementMap []*ElementTriplet - -type MapHash struct { - Map *ElementMap - state fr.Element - stateValid bool -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func (m *MapHash) Write(p []byte) (n int, err error) { - var x fr.Element - for i := 0; i < len(p); i += fr.Bytes { - x.SetBytes(p[i:min(len(p), i+fr.Bytes)]) - if err = m.write(x); err != nil { - return - } - } - n = len(p) - return -} - -func (m *MapHash) Sum(b []byte) []byte { - mP := *m - if _, err := mP.Write(b); err != nil { - panic(err) - } - bytes := mP.state.Bytes() - return bytes[:] -} - -func (m *MapHash) Reset() { - m.stateValid = false -} - -func (m *MapHash) Size() int { - return fr.Bytes -} - -func (m *MapHash) BlockSize() int { - return fr.Bytes -} - -func (m *MapHash) write(x fr.Element) error { - X := &x - Y := &m.state - if !m.stateValid { - Y = nil - } - var err error - if m.state, err = m.Map.FindPair(X, Y); err == nil { - m.stateValid = true - } - return err -} - -func (t *ElementTriplet) writeKey(sb *strings.Builder) { - sb.WriteRune('"') - sb.WriteString(t.key1.String()) - if t.key2Present { - sb.WriteRune(',') - sb.WriteString(t.key2.String()) - } - sb.WriteRune('"') -} -func (m *ElementMap) UnusedEntries() []interface{} { - unused := make([]interface{}, 0) - for _, v := range *m { - if !v.used { - var vInterface interface{} - if v.key2Present { - vInterface = []interface{}{ElementToInterface(&v.key1), ElementToInterface(&v.key2)} - } else { - vInterface = ElementToInterface(&v.key1) - } - unused = append(unused, vInterface) - } - } - return unused -} - -func (m *ElementMap) sort() { - sort.Slice(*m, func(i, j int) bool { - return (*m)[i].CmpKey((*m)[j]) <= 0 - }) -} - -func (m *ElementMap) find(toFind *ElementTriplet) (fr.Element, error) { - i := sort.Search(len(*m), func(i int) bool { return (*m)[i].CmpKey(toFind) >= 0 }) - - if i < len(*m) && (*m)[i].CmpKey(toFind) == 0 { - (*m)[i].used = true - return (*m)[i].value, nil - } - var sb strings.Builder - sb.WriteString("no value available for input ") - toFind.writeKey(&sb) - return fr.Element{}, fmt.Errorf(sb.String()) -} - -func (m *ElementMap) FindPair(x *fr.Element, y *fr.Element) (fr.Element, error) { - - toFind := ElementTriplet{ - key1: *x, - key2Present: y != nil, - } - - if y != nil { - toFind.key2 = *y - } - - return m.find(&toFind) -} - -func ToElement(i int64) *fr.Element { - var res fr.Element - res.SetInt64(i) - return &res -} - -type MessageCounter struct { - startState uint64 - state uint64 - step uint64 -} - -func (m *MessageCounter) Write(p []byte) (n int, err error) { - inputBlockSize := (len(p)-1)/fr.Bytes + 1 - m.state += uint64(inputBlockSize) * m.step - return len(p), nil -} - -func (m *MessageCounter) Sum(b []byte) []byte { - inputBlockSize := (len(b)-1)/fr.Bytes + 1 - resI := m.state + uint64(inputBlockSize)*m.step - var res fr.Element - res.SetInt64(int64(resI)) - resBytes := res.Bytes() - return resBytes[:] -} - -func (m *MessageCounter) Reset() { - m.state = m.startState -} - -func (m *MessageCounter) Size() int { - return fr.Bytes -} - -func (m *MessageCounter) BlockSize() int { - return fr.Bytes -} - -func NewMessageCounter(startState, step int) hash.Hash { - transcript := &MessageCounter{startState: uint64(startState), state: uint64(startState), step: uint64(step)} - return transcript -} - -func NewMessageCounterGenerator(startState, step int) func() hash.Hash { - return func() hash.Hash { - return NewMessageCounter(startState, step) - } -} - -type ListHash []fr.Element - -func (h *ListHash) Write(p []byte) (n int, err error) { - return len(p), nil -} - -func (h *ListHash) Sum(b []byte) []byte { - res := (*h)[0].Bytes() - *h = (*h)[1:] - return res[:] -} - -func (h *ListHash) Reset() { -} - -func (h *ListHash) Size() int { - return fr.Bytes -} - -func (h *ListHash) BlockSize() int { - return fr.Bytes -} -func SetElement(z *fr.Element, value interface{}) (*fr.Element, error) { - - // TODO: Put this in element.SetString? - switch v := value.(type) { - case string: - - if sep := strings.Split(v, "/"); len(sep) == 2 { - var denom fr.Element - if _, err := z.SetString(sep[0]); err != nil { - return nil, err - } - if _, err := denom.SetString(sep[1]); err != nil { - return nil, err - } - denom.Inverse(&denom) - z.Mul(z, &denom) - return z, nil - } - - case float64: - asInt := int64(v) - if float64(asInt) != v { - return nil, fmt.Errorf("cannot currently parse float") - } - z.SetInt64(asInt) - return z, nil - } - - return z.SetInterface(value) -} - -func SliceToElementSlice[T any](slice []T) ([]fr.Element, error) { - elementSlice := make([]fr.Element, len(slice)) - for i, v := range slice { - if _, err := SetElement(&elementSlice[i], v); err != nil { - return nil, err - } - } - return elementSlice, nil -} - -func SliceEquals(a []fr.Element, b []fr.Element) error { - if len(a) != len(b) { - return fmt.Errorf("length mismatch %d≠%d", len(a), len(b)) - } - for i := range a { - if !a[i].Equal(&b[i]) { - return fmt.Errorf("at index %d: %s ≠ %s", i, a[i].String(), b[i].String()) - } - } - return nil -} - -func SliceSliceEquals(a [][]fr.Element, b [][]fr.Element) error { - if len(a) != len(b) { - return fmt.Errorf("length mismatch %d≠%d", len(a), len(b)) - } - for i := range a { - if err := SliceEquals(a[i], b[i]); err != nil { - return fmt.Errorf("at index %d: %w", i, err) - } - } - return nil -} - -func PolynomialSliceEquals(a []polynomial.Polynomial, b []polynomial.Polynomial) error { - if len(a) != len(b) { - return fmt.Errorf("length mismatch %d≠%d", len(a), len(b)) - } - for i := range a { - if err := SliceEquals(a[i], b[i]); err != nil { - return fmt.Errorf("at index %d: %w", i, err) - } - } - return nil -} - -func ElementToInterface(x *fr.Element) interface{} { - text := x.Text(10) - if len(text) < 10 && !strings.Contains(text, "/") { - if i, err := strconv.Atoi(text); err != nil { - panic(err.Error()) - } else { - return i - } - } - return text -} - -func ElementSliceToInterfaceSlice(x interface{}) []interface{} { - if x == nil { - return nil - } - - X := reflect.ValueOf(x) - - res := make([]interface{}, X.Len()) - for i := range res { - xI := X.Index(i).Interface().(fr.Element) - res[i] = ElementToInterface(&xI) - } - return res -} - -func ElementSliceSliceToInterfaceSliceSlice(x interface{}) [][]interface{} { - if x == nil { - return nil - } - - X := reflect.ValueOf(x) - - res := make([][]interface{}, X.Len()) - for i := range res { - res[i] = ElementSliceToInterfaceSlice(X.Index(i).Interface()) - } - - return res -} diff --git a/tools/gnark/bn254/fr/fft/doc.go b/tools/gnark/bn254/fr/fft/doc.go deleted file mode 100644 index 3c35170e..00000000 --- a/tools/gnark/bn254/fr/fft/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package fft provides in-place discrete Fourier transform. -package fft diff --git a/tools/gnark/bn254/fr/fft/domain.go b/tools/gnark/bn254/fr/fft/domain.go deleted file mode 100644 index 3a94fcfa..00000000 --- a/tools/gnark/bn254/fr/fft/domain.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "fmt" - "io" - "math/big" - "math/bits" - "runtime" - "sync" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - - curve "github.com/consensys/gnark-crypto/ecc/bn254" - - "github.com/consensys/gnark-crypto/ecc" -) - -// Domain with a power of 2 cardinality -// compute a field element of order 2x and store it in FinerGenerator -// all other values can be derived from x, GeneratorSqrt -type Domain struct { - Cardinality uint64 - CardinalityInv fr.Element - Generator fr.Element - GeneratorInv fr.Element - FrMultiplicativeGen fr.Element // generator of Fr* - FrMultiplicativeGenInv fr.Element - - // the following slices are not serialized and are (re)computed through domain.preComputeTwiddles() - - // Twiddles factor for the FFT using Generator for each stage of the recursive FFT - Twiddles [][]fr.Element - - // Twiddles factor for the FFT using GeneratorInv for each stage of the recursive FFT - TwiddlesInv [][]fr.Element - - // we precompute these mostly to avoid the memory intensive bit reverse permutation in the groth16.Prover - - // CosetTable u*<1,g,..,g^(n-1)> - CosetTable []fr.Element - CosetTableReversed []fr.Element // optional, this is computed on demand at the creation of the domain - - // CosetTable[i][j] = domain.Generator(i-th)SqrtInv ^ j - CosetTableInv []fr.Element - CosetTableInvReversed []fr.Element // optional, this is computed on demand at the creation of the domain -} - -// NewDomain returns a subgroup with a power of 2 cardinality -// cardinality >= m -func NewDomain(m uint64) *Domain { - - domain := &Domain{} - x := ecc.NextPowerOfTwo(m) - domain.Cardinality = uint64(x) - - // generator of the largest 2-adic subgroup - var rootOfUnity fr.Element - - rootOfUnity.SetString("19103219067921713944291392827692070036145651957329286315305642004821462161904") - const maxOrderRoot uint64 = 28 - domain.FrMultiplicativeGen.SetUint64(5) - - domain.FrMultiplicativeGenInv.Inverse(&domain.FrMultiplicativeGen) - - // find generator for Z/2^(log(m))Z - logx := uint64(bits.TrailingZeros64(x)) - if logx > maxOrderRoot { - panic(fmt.Sprintf("m (%d) is too big: the required root of unity does not exist", m)) - } - - // Generator = FinerGenerator^2 has order x - expo := uint64(1 << (maxOrderRoot - logx)) - domain.Generator.Exp(rootOfUnity, big.NewInt(int64(expo))) // order x - domain.GeneratorInv.Inverse(&domain.Generator) - domain.CardinalityInv.SetUint64(uint64(x)).Inverse(&domain.CardinalityInv) - - // twiddle factors - domain.preComputeTwiddles() - - // store the bit reversed coset tables - domain.reverseCosetTables() - - return domain -} - -func (d *Domain) reverseCosetTables() { - d.CosetTableReversed = make([]fr.Element, d.Cardinality) - d.CosetTableInvReversed = make([]fr.Element, d.Cardinality) - copy(d.CosetTableReversed, d.CosetTable) - copy(d.CosetTableInvReversed, d.CosetTableInv) - BitReverse(d.CosetTableReversed) - BitReverse(d.CosetTableInvReversed) -} - -func (d *Domain) preComputeTwiddles() { - - // nb fft stages - nbStages := uint64(bits.TrailingZeros64(d.Cardinality)) - - d.Twiddles = make([][]fr.Element, nbStages) - d.TwiddlesInv = make([][]fr.Element, nbStages) - d.CosetTable = make([]fr.Element, d.Cardinality) - d.CosetTableInv = make([]fr.Element, d.Cardinality) - - var wg sync.WaitGroup - - // for each fft stage, we pre compute the twiddle factors - twiddles := func(t [][]fr.Element, omega fr.Element) { - for i := uint64(0); i < nbStages; i++ { - t[i] = make([]fr.Element, 1+(1<<(nbStages-i-1))) - var w fr.Element - if i == 0 { - w = omega - } else { - w = t[i-1][2] - } - t[i][0] = fr.One() - t[i][1] = w - for j := 2; j < len(t[i]); j++ { - t[i][j].Mul(&t[i][j-1], &w) - } - } - wg.Done() - } - - expTable := func(sqrt fr.Element, t []fr.Element) { - t[0] = fr.One() - precomputeExpTable(sqrt, t) - wg.Done() - } - - wg.Add(4) - go twiddles(d.Twiddles, d.Generator) - go twiddles(d.TwiddlesInv, d.GeneratorInv) - go expTable(d.FrMultiplicativeGen, d.CosetTable) - go expTable(d.FrMultiplicativeGenInv, d.CosetTableInv) - - wg.Wait() - -} - -func precomputeExpTable(w fr.Element, table []fr.Element) { - n := len(table) - - // see if it makes sense to parallelize exp tables pre-computation - interval := 0 - if runtime.NumCPU() >= 4 { - interval = (n - 1) / (runtime.NumCPU() / 4) - } - - // this ratio roughly correspond to the number of multiplication one can do in place of a Exp operation - const ratioExpMul = 6000 / 17 - - if interval < ratioExpMul { - precomputeExpTableChunk(w, 1, table[1:]) - return - } - - // we parallelize - var wg sync.WaitGroup - for i := 1; i < n; i += interval { - start := i - end := i + interval - if end > n { - end = n - } - wg.Add(1) - go func() { - precomputeExpTableChunk(w, uint64(start), table[start:end]) - wg.Done() - }() - } - wg.Wait() -} - -func precomputeExpTableChunk(w fr.Element, power uint64, table []fr.Element) { - - // this condition ensures that creating a domain of size 1 with cosets don't fail - if len(table) > 0 { - table[0].Exp(w, new(big.Int).SetUint64(power)) - for i := 1; i < len(table); i++ { - table[i].Mul(&table[i-1], &w) - } - } -} - -// WriteTo writes a binary representation of the domain (without the precomputed twiddle factors) -// to the provided writer -func (d *Domain) WriteTo(w io.Writer) (int64, error) { - - enc := curve.NewEncoder(w) - - toEncode := []interface{}{d.Cardinality, &d.CardinalityInv, &d.Generator, &d.GeneratorInv, &d.FrMultiplicativeGen, &d.FrMultiplicativeGenInv} - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom attempts to decode a domain from Reader -func (d *Domain) ReadFrom(r io.Reader) (int64, error) { - - dec := curve.NewDecoder(r) - - toDecode := []interface{}{&d.Cardinality, &d.CardinalityInv, &d.Generator, &d.GeneratorInv, &d.FrMultiplicativeGen, &d.FrMultiplicativeGenInv} - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - // twiddle factors - d.preComputeTwiddles() - - // store the bit reversed coset tables if needed - d.reverseCosetTables() - - return dec.BytesRead(), nil -} diff --git a/tools/gnark/bn254/fr/fft/domain_test.go b/tools/gnark/bn254/fr/fft/domain_test.go deleted file mode 100644 index 14d23dd9..00000000 --- a/tools/gnark/bn254/fr/fft/domain_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "bytes" - "reflect" - "testing" -) - -func TestDomainSerialization(t *testing.T) { - - domain := NewDomain(1 << 6) - var reconstructed Domain - - var buf bytes.Buffer - written, err := domain.WriteTo(&buf) - if err != nil { - t.Fatal(err) - } - var read int64 - read, err = reconstructed.ReadFrom(&buf) - if err != nil { - t.Fatal(err) - } - - if written != read { - t.Fatal("didn't read as many bytes as we wrote") - } - if !reflect.DeepEqual(domain, &reconstructed) { - t.Fatal("Domain.SetBytes(Bytes()) failed") - } -} diff --git a/tools/gnark/bn254/fr/fft/fft.go b/tools/gnark/bn254/fr/fft/fft.go deleted file mode 100644 index 4d4557dc..00000000 --- a/tools/gnark/bn254/fr/fft/fft.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "math/bits" - "runtime" - - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/internal/parallel" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" -) - -// Decimation is used in the FFT call to select decimation in time or in frequency -type Decimation uint8 - -const ( - DIT Decimation = iota - DIF -) - -// parallelize threshold for a single butterfly op, if the fft stage is not parallelized already -const butterflyThreshold = 16 - -// FFT computes (recursively) the discrete Fourier transform of a and stores the result in a -// if decimation == DIT (decimation in time), the input must be in bit-reversed order -// if decimation == DIF (decimation in frequency), the output will be in bit-reversed order -// if coset if set, the FFT(a) returns the evaluation of a on a coset. -func (domain *Domain) FFT(a []fr.Element, decimation Decimation, coset ...bool) { - - numCPU := uint64(runtime.NumCPU()) - - _coset := false - if len(coset) > 0 { - _coset = coset[0] - } - - // if coset != 0, scale by coset table - if _coset { - scale := func(cosetTable []fr.Element) { - parallel.Execute(len(a), func(start, end int) { - for i := start; i < end; i++ { - a[i].Mul(&a[i], &cosetTable[i]) - } - }) - } - if decimation == DIT { - scale(domain.CosetTableReversed) - - } else { - scale(domain.CosetTable) - } - } - - // find the stage where we should stop spawning go routines in our recursive calls - // (ie when we have as many go routines running as we have available CPUs) - maxSplits := bits.TrailingZeros64(ecc.NextPowerOfTwo(numCPU)) - if numCPU <= 1 { - maxSplits = -1 - } - - switch decimation { - case DIF: - difFFT(a, domain.Twiddles, 0, maxSplits, nil) - case DIT: - ditFFT(a, domain.Twiddles, 0, maxSplits, nil) - default: - panic("not implemented") - } -} - -// FFTInverse computes (recursively) the inverse discrete Fourier transform of a and stores the result in a -// if decimation == DIT (decimation in time), the input must be in bit-reversed order -// if decimation == DIF (decimation in frequency), the output will be in bit-reversed order -// coset sets the shift of the fft (0 = no shift, standard fft) -// len(a) must be a power of 2, and w must be a len(a)th root of unity in field F. -func (domain *Domain) FFTInverse(a []fr.Element, decimation Decimation, coset ...bool) { - - numCPU := uint64(runtime.NumCPU()) - - _coset := false - if len(coset) > 0 { - _coset = coset[0] - } - - // find the stage where we should stop spawning go routines in our recursive calls - // (ie when we have as many go routines running as we have available CPUs) - maxSplits := bits.TrailingZeros64(ecc.NextPowerOfTwo(numCPU)) - if numCPU <= 1 { - maxSplits = -1 - } - switch decimation { - case DIF: - difFFT(a, domain.TwiddlesInv, 0, maxSplits, nil) - case DIT: - ditFFT(a, domain.TwiddlesInv, 0, maxSplits, nil) - default: - panic("not implemented") - } - - // scale by CardinalityInv - if !_coset { - parallel.Execute(len(a), func(start, end int) { - for i := start; i < end; i++ { - a[i].Mul(&a[i], &domain.CardinalityInv) - } - }) - return - } - - scale := func(cosetTable []fr.Element) { - parallel.Execute(len(a), func(start, end int) { - for i := start; i < end; i++ { - a[i].Mul(&a[i], &cosetTable[i]). - Mul(&a[i], &domain.CardinalityInv) - } - }) - } - if decimation == DIT { - scale(domain.CosetTableInv) - return - } - - // decimation == DIF - scale(domain.CosetTableInvReversed) - -} - -func difFFT(a []fr.Element, twiddles [][]fr.Element, stage, maxSplits int, chDone chan struct{}) { - if chDone != nil { - defer close(chDone) - } - - n := len(a) - if n == 1 { - return - } else if n == 8 { - kerDIF8(a, twiddles, stage) - return - } - m := n >> 1 - - // if stage < maxSplits, we parallelize this butterfly - // but we have only numCPU / stage cpus available - if (m > butterflyThreshold) && (stage < maxSplits) { - // 1 << stage == estimated used CPUs - numCPU := runtime.NumCPU() / (1 << (stage)) - parallel.Execute(m, func(start, end int) { - for i := start; i < end; i++ { - fr.Butterfly(&a[i], &a[i+m]) - a[i+m].Mul(&a[i+m], &twiddles[stage][i]) - } - }, numCPU) - } else { - // i == 0 - fr.Butterfly(&a[0], &a[m]) - for i := 1; i < m; i++ { - fr.Butterfly(&a[i], &a[i+m]) - a[i+m].Mul(&a[i+m], &twiddles[stage][i]) - } - } - - if m == 1 { - return - } - - nextStage := stage + 1 - if stage < maxSplits { - chDone := make(chan struct{}, 1) - go difFFT(a[m:n], twiddles, nextStage, maxSplits, chDone) - difFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - <-chDone - } else { - difFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - difFFT(a[m:n], twiddles, nextStage, maxSplits, nil) - } - -} - -func ditFFT(a []fr.Element, twiddles [][]fr.Element, stage, maxSplits int, chDone chan struct{}) { - if chDone != nil { - defer close(chDone) - } - n := len(a) - if n == 1 { - return - } else if n == 8 { - kerDIT8(a, twiddles, stage) - return - } - m := n >> 1 - - nextStage := stage + 1 - - if stage < maxSplits { - // that's the only time we fire go routines - chDone := make(chan struct{}, 1) - go ditFFT(a[m:], twiddles, nextStage, maxSplits, chDone) - ditFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - <-chDone - } else { - ditFFT(a[0:m], twiddles, nextStage, maxSplits, nil) - ditFFT(a[m:n], twiddles, nextStage, maxSplits, nil) - - } - - // if stage < maxSplits, we parallelize this butterfly - // but we have only numCPU / stage cpus available - if (m > butterflyThreshold) && (stage < maxSplits) { - // 1 << stage == estimated used CPUs - numCPU := runtime.NumCPU() / (1 << (stage)) - parallel.Execute(m, func(start, end int) { - for k := start; k < end; k++ { - a[k+m].Mul(&a[k+m], &twiddles[stage][k]) - fr.Butterfly(&a[k], &a[k+m]) - } - }, numCPU) - - } else { - fr.Butterfly(&a[0], &a[m]) - for k := 1; k < m; k++ { - a[k+m].Mul(&a[k+m], &twiddles[stage][k]) - fr.Butterfly(&a[k], &a[k+m]) - } - } -} - -// BitReverse applies the bit-reversal permutation to a. -// len(a) must be a power of 2 (as in every single function in this file) -func BitReverse(a []fr.Element) { - n := uint64(len(a)) - nn := uint64(64 - bits.TrailingZeros64(n)) - - for i := uint64(0); i < n; i++ { - irev := bits.Reverse64(i) >> nn - if irev > i { - a[i], a[irev] = a[irev], a[i] - } - } -} - -// kerDIT8 is a kernel that process a FFT of size 8 -func kerDIT8(a []fr.Element, twiddles [][]fr.Element, stage int) { - - fr.Butterfly(&a[0], &a[1]) - fr.Butterfly(&a[2], &a[3]) - fr.Butterfly(&a[4], &a[5]) - fr.Butterfly(&a[6], &a[7]) - fr.Butterfly(&a[0], &a[2]) - a[3].Mul(&a[3], &twiddles[stage+1][1]) - fr.Butterfly(&a[1], &a[3]) - fr.Butterfly(&a[4], &a[6]) - a[7].Mul(&a[7], &twiddles[stage+1][1]) - fr.Butterfly(&a[5], &a[7]) - fr.Butterfly(&a[0], &a[4]) - a[5].Mul(&a[5], &twiddles[stage+0][1]) - fr.Butterfly(&a[1], &a[5]) - a[6].Mul(&a[6], &twiddles[stage+0][2]) - fr.Butterfly(&a[2], &a[6]) - a[7].Mul(&a[7], &twiddles[stage+0][3]) - fr.Butterfly(&a[3], &a[7]) -} - -// kerDIF8 is a kernel that process a FFT of size 8 -func kerDIF8(a []fr.Element, twiddles [][]fr.Element, stage int) { - - fr.Butterfly(&a[0], &a[4]) - fr.Butterfly(&a[1], &a[5]) - fr.Butterfly(&a[2], &a[6]) - fr.Butterfly(&a[3], &a[7]) - a[5].Mul(&a[5], &twiddles[stage+0][1]) - a[6].Mul(&a[6], &twiddles[stage+0][2]) - a[7].Mul(&a[7], &twiddles[stage+0][3]) - fr.Butterfly(&a[0], &a[2]) - fr.Butterfly(&a[1], &a[3]) - fr.Butterfly(&a[4], &a[6]) - fr.Butterfly(&a[5], &a[7]) - a[3].Mul(&a[3], &twiddles[stage+1][1]) - a[7].Mul(&a[7], &twiddles[stage+1][1]) - fr.Butterfly(&a[0], &a[1]) - fr.Butterfly(&a[2], &a[3]) - fr.Butterfly(&a[4], &a[5]) - fr.Butterfly(&a[6], &a[7]) -} diff --git a/tools/gnark/bn254/fr/fft/fft_test.go b/tools/gnark/bn254/fr/fft/fft_test.go deleted file mode 100644 index 59e3db45..00000000 --- a/tools/gnark/bn254/fr/fft/fft_test.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fft - -import ( - "math/big" - "strconv" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - "github.com/leanovate/gopter/prop" -) - -func TestFFT(t *testing.T) { - const maxSize = 1 << 10 - - nbCosets := 3 - domainWithPrecompute := NewDomain(maxSize) - - parameters := gopter.DefaultTestParameters() - parameters.MinSuccessfulTests = 5 - - properties := gopter.NewProperties(parameters) - - properties.Property("DIF FFT should be consistent with dual basis", prop.ForAll( - - // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result - func(ithpower int) bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFT(pol, DIF, false) - BitReverse(pol) - - sample := domainWithPrecompute.Generator - sample.Exp(sample, big.NewInt(int64(ithpower))) - - eval := evaluatePolynomial(backupPol, sample) - - return eval.Equal(&pol[ithpower]) - - }, - gen.IntRange(0, maxSize-1), - )) - - properties.Property("DIF FFT on cosets should be consistent with dual basis", prop.ForAll( - - // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result - func(ithpower int) bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFT(pol, DIF, true) - BitReverse(pol) - - sample := domainWithPrecompute.Generator - sample.Exp(sample, big.NewInt(int64(ithpower))). - Mul(&sample, &domainWithPrecompute.FrMultiplicativeGen) - - eval := evaluatePolynomial(backupPol, sample) - - return eval.Equal(&pol[ithpower]) - - }, - gen.IntRange(0, maxSize-1), - )) - - properties.Property("DIT FFT should be consistent with dual basis", prop.ForAll( - - // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result - func(ithpower int) bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - BitReverse(pol) - domainWithPrecompute.FFT(pol, DIT, false) - - sample := domainWithPrecompute.Generator - sample.Exp(sample, big.NewInt(int64(ithpower))) - - eval := evaluatePolynomial(backupPol, sample) - - return eval.Equal(&pol[ithpower]) - - }, - gen.IntRange(0, maxSize-1), - )) - - properties.Property("bitReverse(DIF FFT(DIT FFT (bitReverse))))==id", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - BitReverse(pol) - domainWithPrecompute.FFT(pol, DIT, false) - domainWithPrecompute.FFTInverse(pol, DIF, false) - BitReverse(pol) - - check := true - for i := 0; i < len(pol); i++ { - check = check && pol[i].Equal(&backupPol[i]) - } - return check - }, - )) - - properties.Property("bitReverse(DIF FFT(DIT FFT (bitReverse))))==id on cosets", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - check := true - - for i := 1; i <= nbCosets; i++ { - - BitReverse(pol) - domainWithPrecompute.FFT(pol, DIT, true) - domainWithPrecompute.FFTInverse(pol, DIF, true) - BitReverse(pol) - - for i := 0; i < len(pol); i++ { - check = check && pol[i].Equal(&backupPol[i]) - } - } - - return check - }, - )) - - properties.Property("DIT FFT(DIF FFT)==id", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFTInverse(pol, DIF, false) - domainWithPrecompute.FFT(pol, DIT, false) - - check := true - for i := 0; i < len(pol); i++ { - check = check && (pol[i] == backupPol[i]) - } - return check - }, - )) - - properties.Property("DIT FFT(DIF FFT)==id on cosets", prop.ForAll( - - func() bool { - - pol := make([]fr.Element, maxSize) - backupPol := make([]fr.Element, maxSize) - - for i := 0; i < maxSize; i++ { - pol[i].SetRandom() - } - copy(backupPol, pol) - - domainWithPrecompute.FFTInverse(pol, DIF, true) - domainWithPrecompute.FFT(pol, DIT, true) - - check := true - for i := 0; i < len(pol); i++ { - check = check && (pol[i] == backupPol[i]) - } - return check - }, - )) - - properties.TestingRun(t, gopter.ConsoleReporter(false)) - -} - -// -------------------------------------------------------------------- -// benches -func BenchmarkBitReverse(b *testing.B) { - - const maxSize = 1 << 20 - - pol := make([]fr.Element, maxSize) - pol[0].SetRandom() - for i := 1; i < maxSize; i++ { - pol[i] = pol[i-1] - } - - for i := 8; i < 20; i++ { - b.Run("bit reversing 2**"+strconv.Itoa(i)+"bits", func(b *testing.B) { - b.ResetTimer() - for j := 0; j < b.N; j++ { - BitReverse(pol[:1<x², on a - // power of 2 subgroup of Fr^{*}. - RADIX_2_FRI IOPP = iota -) - -// round contains the data corresponding to a single round -// of fri. -// It consists of a list of Interactions between the prover and the verifier, -// where each interaction contains a challenge provided by the verifier, as -// well as MerkleProofs for the queries of the verifier. The Merkle proofs -// correspond to the openings of the i-th folded polynomial at 2 points that -// belong to the same fiber of x -> x². -type Round struct { - - // stores the Interactions between the prover and the verifier. - // Each interaction results in a set or merkle proofs, corresponding - // to the queries of the verifier. - Interactions [][2]MerkleProof - - // evaluation stores the evaluation of the fully folded polynomial. - // The fully folded polynomial is constant, and is evaluated on a - // a set of size \rho. Since the polynomial is supposed to be constant, - // only one evaluation, corresponding to the polynomial, is given. Since - // the prover cannot know in advance which entry the verifier will query, - // providing a single evaluation - Evaluation fr.Element -} - -// ProofOfProximity proof of proximity, attesting that -// a function is d-close to a low degree polynomial. -// -// It is composed of a series of Interactions, emulated with Fiat Shamir, -type ProofOfProximity struct { - - // ID unique ID attached to the proof of proximity. It's needed for - // protocols using Fiat Shamir for instance, where challenges are derived - // from the proof of proximity. - ID []byte - - // round contains the data corresponding to a single round - // of fri. There are nbRounds rounds of Interactions. - Rounds []Round -} - -// Iopp interface that an iopp should implement -type Iopp interface { - - // BuildProofOfProximity creates a proof of proximity that p is d-close to a polynomial - // of degree len(p). The proof is built non interactively using Fiat Shamir. - BuildProofOfProximity(p []fr.Element) (ProofOfProximity, error) - - // VerifyProofOfProximity verifies the proof of proximity. It returns an error if the - // verification fails. - VerifyProofOfProximity(proof ProofOfProximity) error - - // Opens a polynomial at gⁱ where i = position. - Open(p []fr.Element, position uint64) (OpeningProof, error) - - // Verifies the opening of a polynomial at gⁱ where i = position. - VerifyOpening(position uint64, openingProof OpeningProof, pp ProofOfProximity) error -} - -// GetRho returns the factor ρ = size_code_word/size_polynomial -func GetRho() int { - return rho -} - -func init() { - twoInv.SetUint64(2).Inverse(&twoInv) -} - -// New creates a new IOPP capable to handle degree(size) polynomials. -func (iopp IOPP) New(size uint64, h hash.Hash) Iopp { - switch iopp { - case RADIX_2_FRI: - return newRadixTwoFri(size, h) - default: - panic("iopp name is not recognized") - } -} - -// radixTwoFri empty structs implementing compressionFunction for -// the squaring function. -type radixTwoFri struct { - - // hash function that is used for Fiat Shamir and for committing to - // the oracles. - h hash.Hash - - // nbSteps number of Interactions between the prover and the verifier - nbSteps int - - // domain used to build the Reed Solomon code from the given polynomial. - // The size of the domain is ρ*size_polynomial. - domain *fft.Domain -} - -func newRadixTwoFri(size uint64, h hash.Hash) radixTwoFri { - - var res radixTwoFri - - // computing the number of steps - n := ecc.NextPowerOfTwo(size) - nbSteps := bits.TrailingZeros(uint(n)) - res.nbSteps = nbSteps - - // extending the domain - n = n * rho - - // building the domains - res.domain = fft.NewDomain(n) - - // hash function - res.h = h - - return res -} - -// convertCanonicalSorted convert the index i, an entry in a -// sorted polynomial, to the corresponding entry in canonical -// representation. n is the size of the polynomial. -func convertCanonicalSorted(i, n int) int { - - if i < n/2 { - return 2 * i - } else { - l := n - (i + 1) - l = 2 * l - return n - l - 1 - } - -} - -// deriveQueriesPositions derives the indices of the oracle -// function that the verifier has to pick, in sorted form. -// * pos is the initial position, i.e. the logarithm of the first challenge -// * size is the size of the initial polynomial -// * The result is a slice of []int, where each entry is a tuple (iₖ), such that -// the verifier needs to evaluate ∑ₖ oracle(iₖ)xᵏ to build -// the folded function. -func (s radixTwoFri) deriveQueriesPositions(pos int, size int) []int { - - _s := size / 2 - res := make([]int, s.nbSteps) - res[0] = pos - for i := 1; i < s.nbSteps; i++ { - t := (res[i-1] - (res[i-1] % 2)) / 2 - res[i] = convertCanonicalSorted(t, _s) - _s = _s / 2 - } - - return res -} - -// sort orders the evaluation of a polynomial on a domain -// such that contiguous entries are in the same fiber: -// {q(g⁰), q(g^{n/2}), q(g¹), q(g^{1+n/2}),...,q(g^{n/2-1}), q(gⁿ⁻¹)} -func sort(evaluations []fr.Element) []fr.Element { - q := make([]fr.Element, len(evaluations)) - n := len(evaluations) / 2 - for i := 0; i < n; i++ { - q[2*i].Set(&evaluations[i]) - q[2*i+1].Set(&evaluations[i+n]) - } - return q -} - -// Opens a polynomial at gⁱ where i = position. -func (s radixTwoFri) Open(p []fr.Element, position uint64) (OpeningProof, error) { - - // check that position is in the correct range - if position >= s.domain.Cardinality { - return OpeningProof{}, ErrRangePosition - } - - // put q in evaluation form - q := make([]fr.Element, s.domain.Cardinality) - copy(q, p) - s.domain.FFT(q, fft.DIF) - fft.BitReverse(q) - - // sort q to have fibers in contiguous entries. The goal is to have one - // Merkle path for both openings of entries which are in the same fiber. - q = sort(q) - - // build the Merkle proof, we the position is converted to fit the sorted polynomial - pos := convertCanonicalSorted(int(position), len(q)) - - tree := merkletree.New(s.h) - err := tree.SetIndex(uint64(pos)) - if err != nil { - return OpeningProof{}, err - } - for i := 0; i < len(q); i++ { - tree.Push(q[i].Marshal()) - } - var res OpeningProof - res.merkleRoot, res.ProofSet, res.index, res.numLeaves = tree.Prove() - - // set the claimed value, which is the first entry of the Merkle proof - res.ClaimedValue.SetBytes(res.ProofSet[0]) - - return res, nil -} - -// Verifies the opening of a polynomial. -// * position the point at which the proof is opened (the point is gⁱ where i = position) -// * openingProof Merkle path proof -// * pp proof of proximity, needed because before opening Merkle path proof one should be sure that the -// committed values come from a polynomial. During the verification of the Merkle path proof, the root -// hash of the Merkle path is compared to the root hash of the first interaction of the proof of proximity, -// those should be equal, if not an error is raised. -func (s radixTwoFri) VerifyOpening(position uint64, openingProof OpeningProof, pp ProofOfProximity) error { - - // To query the Merkle path, we look at the first series of Interactions, and check whether it's the point - // at 'position' or its neighbor that contains the full Merkle path. - var fullMerkleProof int - if len(pp.Rounds[0].Interactions[0][0].ProofSet) > len(pp.Rounds[0].Interactions[0][1].ProofSet) { - fullMerkleProof = 0 - } else { - fullMerkleProof = 1 - } - - // check that the merkle roots coincide - if !bytes.Equal(openingProof.merkleRoot, pp.Rounds[0].Interactions[0][fullMerkleProof].MerkleRoot) { - return ErrMerkleRoot - } - - // convert position to the sorted version - sizePoly := s.domain.Cardinality - pos := convertCanonicalSorted(int(position), int(sizePoly)) - - // check the Merkle proof - res := merkletree.VerifyProof(s.h, openingProof.merkleRoot, openingProof.ProofSet, uint64(pos), openingProof.numLeaves) - if !res { - return ErrMerklePath - } - return nil - -} - -// foldPolynomialLagrangeBasis folds a polynomial p, expressed in Lagrange basis. -// -// Fᵣ[X]/(Xⁿ-1) is a free module of rank 2 on Fᵣ[Y]/(Y^{n/2}-1). If -// p∈ Fᵣ[X]/(Xⁿ-1), expressed in Lagrange basis, the function finds the coordinates -// p₁, p₂ of p in Fᵣ[Y]/(Y^{n/2}-1), expressed in Lagrange basis. Finally, it computes -// p₁ + x*p₂ and returns it. -// -// * p is the polynomial to fold, in Lagrange basis, sorted like this: p = [p(1),p(-1),p(g),p(-g),p(g²),p(-g²),...] -// * g is a generator of the subgroup of Fᵣ^{*} of size len(p) -// * x is the folding challenge x, used to return p₁+x*p₂ -func foldPolynomialLagrangeBasis(pSorted []fr.Element, gInv, x fr.Element) []fr.Element { - - // we have the following system - // p₁(g²ⁱ)+gⁱp₂(g²ⁱ) = p(gⁱ) - // p₁(g²ⁱ)-gⁱp₂(g²ⁱ) = p(-gⁱ) - // we solve the system for p₁(g²ⁱ),p₂(g²ⁱ) - s := len(pSorted) - res := make([]fr.Element, s/2) - - var p1, p2, acc fr.Element - acc.SetOne() - - for i := 0; i < s/2; i++ { - - p1.Add(&pSorted[2*i], &pSorted[2*i+1]) - p2.Sub(&pSorted[2*i], &pSorted[2*i+1]).Mul(&p2, &acc) - res[i].Mul(&p2, &x).Add(&res[i], &p1).Mul(&res[i], &twoInv) - - acc.Mul(&acc, &gInv) - - } - - return res -} - -// paddNaming takes s = 0xA1.... and turns -// it into s' = 0xA1.. || 0..0 of size frSize bytes. -// Using this, when writing the domain separator in FiatShamir, it takes -// the same size as a snark variable (=number of byte in the block of a snark compliant -// hash function like mimc), so it is compliant with snark circuit. -func paddNaming(s string, size int) string { - a := make([]byte, size) - b := []byte(s) - copy(a, b) - return string(a) -} - -// buildProofOfProximitySingleRound generates a proof that a function, given as an oracle from -// the verifier point of view, is in fact δ-close to a polynomial. -// * salt is a variable for multi rounds, it allows to generate different challenges using Fiat Shamir -// * p is in evaluation form -func (s radixTwoFri) buildProofOfProximitySingleRound(salt fr.Element, p []fr.Element) (Round, error) { - - // the proof will contain nbSteps Interactions - var res Round - res.Interactions = make([][2]MerkleProof, s.nbSteps) - - // Fiat Shamir transcript to derive the challenges. The xᵢ are used to fold the - // polynomials. - // During the i-th round, the prover has a polynomial P of degree n. The verifier sends - // xᵢ∈ Fᵣ to the prover. The prover expresses F in Fᵣ[X,Y]/ as - // P₀(Y)+X P₁(Y) where P₀, P₁ are of degree n/2, and he then folds the polynomial - // by replacing x by xᵢ. - xis := make([]string, s.nbSteps+1) - for i := 0; i < s.nbSteps; i++ { - xis[i] = paddNaming(fmt.Sprintf("x%d", i), fr.Bytes) - } - xis[s.nbSteps] = paddNaming("s0", fr.Bytes) - fs := fiatshamir.NewTranscript(s.h, xis...) - - // the salt is binded to the first challenge, to ensure the challenges - // are different at each round. - err := fs.Bind(xis[0], salt.Marshal()) - if err != nil { - return Round{}, err - } - - // step 1 : fold the polynomial using the xi - - // evalsAtRound stores the list of the nbSteps polynomial evaluations, each evaluation - // corresponds to the evaluation o the folded polynomial at round i. - evalsAtRound := make([][]fr.Element, s.nbSteps) - - // evaluate p and sort the result - _p := make([]fr.Element, s.domain.Cardinality) - copy(_p, p) - - // gInv inverse of the generator of the cyclic group of size the size of the polynomial. - // The size of the cyclic group is ρ*s.domainSize, and not s.domainSize. - var gInv fr.Element - gInv.Set(&s.domain.GeneratorInv) - - for i := 0; i < s.nbSteps; i++ { - - evalsAtRound[i] = sort(_p) - - // compute the root hash, needed to derive xi - t := merkletree.New(s.h) - for k := 0; k < len(_p); k++ { - t.Push(evalsAtRound[i][k].Marshal()) - } - rh := t.Root() - err := fs.Bind(xis[i], rh) - if err != nil { - return res, err - } - - // derive the challenge - bxi, err := fs.ComputeChallenge(xis[i]) - if err != nil { - return res, err - } - var xi fr.Element - xi.SetBytes(bxi) - - // fold _p, reusing its memory - _p = foldPolynomialLagrangeBasis(evalsAtRound[i], gInv, xi) - - // g <- g² - gInv.Square(&gInv) - - } - - // last round, provide the evaluation. The fully folded polynomial is of size rho. It should - // correspond to the evaluation of a polynomial of degree 1 on ρ points, so those points - // are supposed to be on a line. - res.Evaluation.Set(&_p[0]) - - // step 2: provide the Merkle proofs of the queries - - // derive the verifier queries - err = fs.Bind(xis[s.nbSteps], res.Evaluation.Marshal()) - if err != nil { - return res, err - } - binSeed, err := fs.ComputeChallenge(xis[s.nbSteps]) - if err != nil { - return res, err - } - var bPos, bCardinality big.Int - bPos.SetBytes(binSeed) - bCardinality.SetUint64(s.domain.Cardinality) - bPos.Mod(&bPos, &bCardinality) - si := s.deriveQueriesPositions(int(bPos.Uint64()), int(s.domain.Cardinality)) - - for i := 0; i < s.nbSteps; i++ { - - // build proofs of queries at s[i] - t := merkletree.New(s.h) - err := t.SetIndex(uint64(si[i])) - if err != nil { - return res, err - } - for k := 0; k < len(evalsAtRound[i]); k++ { - t.Push(evalsAtRound[i][k].Marshal()) - } - mr, ProofSet, _, numLeaves := t.Prove() - - // c denotes the entry that contains the full Merkle proof. The entry 1-c will - // only contain 2 elements, which are the neighbor point, and the hash of the - // first point. The remaining of the Merkle path is common to both the original - // point and its neighbor. - c := si[i] % 2 - res.Interactions[i][c] = MerkleProof{mr, ProofSet, numLeaves} - res.Interactions[i][1-c] = MerkleProof{ - mr, - make([][]byte, 2), - numLeaves, - } - res.Interactions[i][1-c].ProofSet[0] = evalsAtRound[i][si[i]+1-2*c].Marshal() - s.h.Reset() - _, err = s.h.Write(res.Interactions[i][c].ProofSet[0]) - if err != nil { - return res, err - } - res.Interactions[i][1-c].ProofSet[1] = s.h.Sum(nil) - - } - - return res, nil - -} - -// BuildProofOfProximity generates a proof that a function, given as an oracle from -// the verifier point of view, is in fact δ-close to a polynomial. -func (s radixTwoFri) BuildProofOfProximity(p []fr.Element) (ProofOfProximity, error) { - - // the proof will contain nbSteps Interactions - var proof ProofOfProximity - proof.Rounds = make([]Round, nbRounds) - - // evaluate p - // evaluate p and sort the result - _p := make([]fr.Element, s.domain.Cardinality) - copy(_p, p) - s.domain.FFT(_p, fft.DIF) - fft.BitReverse(_p) - - var err error - var salt, one fr.Element - one.SetOne() - for i := 0; i < nbRounds; i++ { - proof.Rounds[i], err = s.buildProofOfProximitySingleRound(salt, _p) - if err != nil { - return proof, err - } - salt.Add(&salt, &one) - } - - return proof, nil -} - -// verifyProofOfProximitySingleRound verifies the proof of proximity. It returns an error if the -// verification fails. -func (s radixTwoFri) verifyProofOfProximitySingleRound(salt fr.Element, proof Round) error { - - // Fiat Shamir transcript to derive the challenges - xis := make([]string, s.nbSteps+1) - for i := 0; i < s.nbSteps; i++ { - xis[i] = paddNaming(fmt.Sprintf("x%d", i), fr.Bytes) - } - xis[s.nbSteps] = paddNaming("s0", fr.Bytes) - fs := fiatshamir.NewTranscript(s.h, xis...) - - xi := make([]fr.Element, s.nbSteps) - - // the salt is binded to the first challenge, to ensure the challenges - // are different at each round. - err := fs.Bind(xis[0], salt.Marshal()) - if err != nil { - return err - } - - for i := 0; i < s.nbSteps; i++ { - err := fs.Bind(xis[i], proof.Interactions[i][0].MerkleRoot) - if err != nil { - return err - } - bxi, err := fs.ComputeChallenge(xis[i]) - if err != nil { - return err - } - xi[i].SetBytes(bxi) - } - - // derive the verifier queries - // for i := 0; i < len(proof.evaluation); i++ { - // err := fs.Bind(xis[s.nbSteps], proof.evaluation[i].Marshal()) - // if err != nil { - // return err - // } - // } - err = fs.Bind(xis[s.nbSteps], proof.Evaluation.Marshal()) - if err != nil { - return err - } - binSeed, err := fs.ComputeChallenge(xis[s.nbSteps]) - if err != nil { - return err - } - var bPos, bCardinality big.Int - bPos.SetBytes(binSeed) - bCardinality.SetUint64(s.domain.Cardinality) - bPos.Mod(&bPos, &bCardinality) - si := s.deriveQueriesPositions(int(bPos.Uint64()), int(s.domain.Cardinality)) - - // for each round check the Merkle proof and the correctness of the folding - - // current size of the polynomial - var accGInv fr.Element - accGInv.Set(&s.domain.GeneratorInv) - for i := 0; i < s.nbSteps; i++ { - - // correctness of Merkle proof - // c is the entry containing the full Merkle proof. - c := si[i] % 2 - res := merkletree.VerifyProof( - s.h, - proof.Interactions[i][c].MerkleRoot, - proof.Interactions[i][c].ProofSet, - uint64(si[i]), - proof.Interactions[i][c].numLeaves, - ) - if !res { - return ErrMerklePath - } - - // we verify the Merkle proof for the neighbor query, to do that we have - // to pick the full Merkle proof of the first entry, stripped off of the leaf and - // the first node. We replace the leaf and the first node by the leaf and the first - // node of the partial Merkle proof, since the leaf and the first node of both proofs - // are the only entries that differ. - ProofSet := make([][]byte, len(proof.Interactions[i][c].ProofSet)) - copy(ProofSet[2:], proof.Interactions[i][c].ProofSet[2:]) - ProofSet[0] = proof.Interactions[i][1-c].ProofSet[0] - ProofSet[1] = proof.Interactions[i][1-c].ProofSet[1] - res = merkletree.VerifyProof( - s.h, - proof.Interactions[i][1-c].MerkleRoot, - ProofSet, - uint64(si[i]+1-2*c), - proof.Interactions[i][1-c].numLeaves, - ) - if !res { - return ErrMerklePath - } - - // correctness of the folding - if i < s.nbSteps-1 { - - var fe, fo, l, r, fn fr.Element - - // l = P(gⁱ), r = P(g^{i+n/2}) - l.SetBytes(proof.Interactions[i][0].ProofSet[0]) - r.SetBytes(proof.Interactions[i][1].ProofSet[0]) - - // (g^{si[i]}, g^{si[i]+1}) is the fiber of g^{2*si[i]}. The system to solve - // (for P₀(g^{2si[i]}), P₀(g^{2si[i]}) ) is: - // P(g^{si[i]}) = P₀(g^{2si[i]}) + g^{si[i]/2}*P₀(g^{2si[i]}) - // P(g^{si[i]+1}) = P₀(g^{2si[i]}) - g^{si[i]/2}*P₀(g^{2si[i]}) - bm := big.NewInt(int64(si[i] / 2)) - var ginv fr.Element - ginv.Exp(accGInv, bm) - fe.Add(&l, &r) // P₁(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Sub(&l, &r).Mul(&fo, &ginv) // P₀(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Mul(&fo, &xi[i]).Add(&fo, &fe).Mul(&fo, &twoInv) // P₀(g²ⁱ) + xᵢ * P₁(g²ⁱ) - - fn.SetBytes(proof.Interactions[i+1][si[i+1]%2].ProofSet[0]) - - if !fo.Equal(&fn) { - return ErrProximityTestFolding - } - - // next inverse generator - accGInv.Square(&accGInv) - } - - } - - // last transition - var fe, fo, l, r fr.Element - - l.SetBytes(proof.Interactions[s.nbSteps-1][0].ProofSet[0]) - r.SetBytes(proof.Interactions[s.nbSteps-1][1].ProofSet[0]) - - _si := si[s.nbSteps-1] / 2 - - accGInv.Exp(accGInv, big.NewInt(int64(_si))) - - fe.Add(&l, &r) // P₁(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Sub(&l, &r).Mul(&fo, &accGInv) // P₀(g²ⁱ) (to be multiplied by 2⁻¹) - fo.Mul(&fo, &xi[s.nbSteps-1]).Add(&fo, &fe).Mul(&fo, &twoInv) // P₀(g²ⁱ) + xᵢ * P₁(g²ⁱ) - - // Last step: the final evaluation should be the evaluation of a degree 0 polynomial, - // so it must be constant. - if !fo.Equal(&proof.Evaluation) { - return ErrProximityTestFolding - } - - return nil -} - -// VerifyProofOfProximity verifies the proof, by checking each interaction one -// by one. -func (s radixTwoFri) VerifyProofOfProximity(proof ProofOfProximity) error { - - var salt, one fr.Element - one.SetOne() - for i := 0; i < nbRounds; i++ { - err := s.verifyProofOfProximitySingleRound(salt, proof.Rounds[i]) - if err != nil { - return err - } - salt.Add(&salt, &one) - } - return nil - -} diff --git a/tools/gnark/bn254/fr/fri/fri_test.go b/tools/gnark/bn254/fr/fri/fri_test.go deleted file mode 100644 index 084a4480..00000000 --- a/tools/gnark/bn254/fr/fri/fri_test.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package fri - -import ( - "crypto/sha256" - "fmt" - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - "github.com/leanovate/gopter/prop" -) - -// logFiber returns u, v such that {g^u, g^v} = f⁻¹((g²)^{_p}) -func logFiber(_p, _n int) (_u, _v big.Int) { - if _p%2 == 0 { - _u.SetInt64(int64(_p / 2)) - _v.SetInt64(int64(_p/2 + _n/2)) - } else { - l := (_n - 1 - _p) / 2 - _u.SetInt64(int64(_n - 1 - l)) - _v.SetInt64(int64(_n - 1 - l - _n/2)) - } - return -} - -func randomPolynomial(size uint64, seed int32) []fr.Element { - p := make([]fr.Element, size) - p[0].SetUint64(uint64(seed)) - for i := 1; i < len(p); i++ { - p[i].Square(&p[i-1]) - } - return p -} - -// convertOrderCanonical convert the index i, an entry in a -// sorted polynomial, to the corresponding entry in canonical -// representation. n is the size of the polynomial. -func convertSortedCanonical(i, n int) int { - if i%2 == 0 { - return i / 2 - } else { - l := (n - 1 - i) / 2 - return n - 1 - l - } -} - -func TestFRI(t *testing.T) { - - parameters := gopter.DefaultTestParameters() - parameters.MinSuccessfulTests = 10 - - properties := gopter.NewProperties(parameters) - - size := 4096 - - properties.Property("verifying wrong opening should fail", prop.ForAll( - - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - p := randomPolynomial(uint64(size), m) - - pos := int64(m % 4096) - pp, _ := s.BuildProofOfProximity(p) - - openingProof, err := s.Open(p, uint64(pos)) - if err != nil { - t.Fatal(err) - } - - // check the Merkle path - tamperedPosition := pos + 1 - err = s.VerifyOpening(uint64(tamperedPosition), openingProof, pp) - - return err != nil - - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("verifying correct opening should succeed", prop.ForAll( - - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - p := randomPolynomial(uint64(size), m) - - pos := uint64(m % int32(size)) - pp, _ := s.BuildProofOfProximity(p) - - openingProof, err := s.Open(p, uint64(pos)) - if err != nil { - t.Fatal(err) - } - - // check the Merkle path - err = s.VerifyOpening(uint64(pos), openingProof, pp) - - return err == nil - - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("The claimed value of a polynomial should match P(x)", prop.ForAll( - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - p := randomPolynomial(uint64(size), m) - - // check the opening value - var g fr.Element - pos := int64(m % 4096) - g.Set(&s.domain.Generator) - g.Exp(g, big.NewInt(pos)) - - var val fr.Element - for i := len(p) - 1; i >= 0; i-- { - val.Mul(&val, &g) - val.Add(&p[i], &val) - } - - openingProof, err := s.Open(p, uint64(pos)) - if err != nil { - t.Fatal(err) - } - - return openingProof.ClaimedValue.Equal(&val) - - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("Derive queries position: points should belong the correct fiber", prop.ForAll( - - func(m int32) bool { - - _s := RADIX_2_FRI.New(uint64(size), sha256.New()) - s := _s.(radixTwoFri) - - var g fr.Element - - _m := int(m) % size - pos := s.deriveQueriesPositions(_m, int(s.domain.Cardinality)) - g.Set(&s.domain.Generator) - n := int(s.domain.Cardinality) - - for i := 0; i < len(pos)-1; i++ { - - u, v := logFiber(pos[i], n) - - var g1, g2, g3 fr.Element - g1.Exp(g, &u).Square(&g1) - g2.Exp(g, &v).Square(&g2) - nextPos := convertSortedCanonical(pos[i+1], n/2) - g3.Square(&g).Exp(g3, big.NewInt(int64(nextPos))) - - if !g1.Equal(&g2) || !g1.Equal(&g3) { - return false - } - g.Square(&g) - n = n >> 1 - } - return true - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.Property("verifying a correctly formed proof should succeed", prop.ForAll( - - func(s int32) bool { - - p := randomPolynomial(uint64(size), s) - - iop := RADIX_2_FRI.New(uint64(size), sha256.New()) - proof, err := iop.BuildProofOfProximity(p) - if err != nil { - t.Fatal(err) - } - - err = iop.VerifyProofOfProximity(proof) - return err == nil - }, - gen.Int32Range(0, int32(rho*size)), - )) - - properties.TestingRun(t, gopter.ConsoleReporter(false)) - -} - -// Benchmarks - -func BenchmarkProximityVerification(b *testing.B) { - - baseSize := 16 - - for i := 0; i < 10; i++ { - - size := baseSize << i - p := make([]fr.Element, size) - for k := 0; k < size; k++ { - p[k].SetRandom() - } - - iop := RADIX_2_FRI.New(uint64(size), sha256.New()) - proof, _ := iop.BuildProofOfProximity(p) - - b.Run(fmt.Sprintf("Polynomial size %d", size), func(b *testing.B) { - b.ResetTimer() - for l := 0; l < b.N; l++ { - iop.VerifyProofOfProximity(proof) - } - }) - - } -} diff --git a/tools/gnark/bn254/fr/gkr/gkr.go b/tools/gnark/bn254/fr/gkr/gkr.go deleted file mode 100644 index 4a75a861..00000000 --- a/tools/gnark/bn254/fr/gkr/gkr.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package gkr - -import ( - "fmt" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/polynomial" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/sumcheck" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "strconv" - "sync" -) - -// The goal is to prove/verify evaluations of many instances of the same circuit - -// Gate must be a low-degree polynomial -type Gate interface { - Evaluate(...fr.Element) fr.Element - Degree() int -} - -type Wire struct { - Gate Gate - Inputs []*Wire // if there are no Inputs, the wire is assumed an input wire - nbUniqueOutputs int // number of other wires using it as input, not counting duplicates (i.e. providing two inputs to the same gate counts as one) -} - -type Circuit []Wire - -func (w Wire) IsInput() bool { - return len(w.Inputs) == 0 -} - -func (w Wire) IsOutput() bool { - return w.nbUniqueOutputs == 0 -} - -func (w Wire) NbClaims() int { - if w.IsOutput() { - return 1 - } - return w.nbUniqueOutputs -} - -func (w Wire) noProof() bool { - return w.IsInput() && w.NbClaims() == 1 -} - -// WireAssignment is assignment of values to the same wire across many instances of the circuit -type WireAssignment map[*Wire]polynomial.MultiLin - -type Proof []sumcheck.Proof // for each layer, for each wire, a sumcheck (for each variable, a polynomial) - -type eqTimesGateEvalSumcheckLazyClaims struct { - wire *Wire - evaluationPoints [][]fr.Element - claimedEvaluations []fr.Element - manager *claimsManager // WARNING: Circular references -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) ClaimsNum() int { - return len(e.evaluationPoints) -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) VarsNum() int { - return len(e.evaluationPoints[0]) -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) CombinedSum(a fr.Element) fr.Element { - evalsAsPoly := polynomial.Polynomial(e.claimedEvaluations) - return evalsAsPoly.Eval(&a) -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) Degree(int) int { - return 1 + e.wire.Gate.Degree() -} - -func (e *eqTimesGateEvalSumcheckLazyClaims) VerifyFinalEval(r []fr.Element, combinationCoeff fr.Element, purportedValue fr.Element, proof interface{}) error { - inputEvaluationsNoRedundancy := proof.([]fr.Element) - - // the eq terms - numClaims := len(e.evaluationPoints) - evaluation := polynomial.EvalEq(e.evaluationPoints[numClaims-1], r) - for i := numClaims - 2; i >= 0; i-- { - evaluation.Mul(&evaluation, &combinationCoeff) - eq := polynomial.EvalEq(e.evaluationPoints[i], r) - evaluation.Add(&evaluation, &eq) - } - - // the g(...) term - var gateEvaluation fr.Element - if e.wire.IsInput() { - gateEvaluation = e.manager.assignment[e.wire].Evaluate(r, e.manager.memPool) - } else { - inputEvaluations := make([]fr.Element, len(e.wire.Inputs)) - indexesInProof := make(map[*Wire]int, len(inputEvaluationsNoRedundancy)) - - proofI := 0 - for inI, in := range e.wire.Inputs { - indexInProof, found := indexesInProof[in] - if !found { - indexInProof = proofI - indexesInProof[in] = indexInProof - - // defer verification, store new claim - e.manager.add(in, r, inputEvaluationsNoRedundancy[indexInProof]) - proofI++ - } - inputEvaluations[inI] = inputEvaluationsNoRedundancy[indexInProof] - } - if proofI != len(inputEvaluationsNoRedundancy) { - return fmt.Errorf("%d input wire evaluations given, %d expected", len(inputEvaluationsNoRedundancy), proofI) - } - gateEvaluation = e.wire.Gate.Evaluate(inputEvaluations...) - } - - evaluation.Mul(&evaluation, &gateEvaluation) - - if evaluation.Equal(&purportedValue) { - return nil - } - return fmt.Errorf("incompatible evaluations") -} - -type eqTimesGateEvalSumcheckClaims struct { - wire *Wire - evaluationPoints [][]fr.Element // x in the paper - claimedEvaluations []fr.Element // y in the paper - manager *claimsManager - - inputPreprocessors []polynomial.MultiLin // P_u in the paper, so that we don't need to pass along all the circuit's evaluations - - eq polynomial.MultiLin // ∑_i τ_i eq(x_i, -) -} - -func (c *eqTimesGateEvalSumcheckClaims) Combine(combinationCoeff fr.Element) polynomial.Polynomial { - varsNum := c.VarsNum() - eqLength := 1 << varsNum - claimsNum := c.ClaimsNum() - // initialize the eq tables - c.eq = c.manager.memPool.Make(eqLength) - - c.eq[0].SetOne() - c.eq.Eq(c.evaluationPoints[0]) - - newEq := polynomial.MultiLin(c.manager.memPool.Make(eqLength)) - aI := combinationCoeff - - for k := 1; k < claimsNum; k++ { //TODO: parallelizable? - // define eq_k = aᵏ eq(x_k1, ..., x_kn, *, ..., *) where x_ki are the evaluation points - newEq[0].Set(&aI) - newEq.Eq(c.evaluationPoints[k]) - - eqAsPoly := polynomial.Polynomial(c.eq) //just semantics - eqAsPoly.Add(eqAsPoly, polynomial.Polynomial(newEq)) - - if k+1 < claimsNum { - aI.Mul(&aI, &combinationCoeff) - } - } - - c.manager.memPool.Dump(newEq) - - // from this point on the claim is a rather simple one: g = E(h) × R_v (P_u0(h), ...) where E and the P_u are multilinear and R_v is of low-degree - - return c.computeGJ() -} - -// computeValAndStep returns val : i ↦ m(1, i...) and step : i ↦ m(1, i...) - m(0, i...) -func computeValAndStep(m polynomial.MultiLin, p *polynomial.Pool) (val polynomial.MultiLin, step polynomial.MultiLin) { - val = p.Clone(m[len(m)/2:]) - step = p.Clone(m[:len(m)/2]) - - valAsPoly, stepAsPoly := polynomial.Polynomial(val), polynomial.Polynomial(step) - - stepAsPoly.Sub(valAsPoly, stepAsPoly) - return -} - -// computeGJ: gⱼ = ∑_{0≤i<2ⁿ⁻ʲ} g(r₁, r₂, ..., rⱼ₋₁, Xⱼ, i...) = ∑_{0≤i<2ⁿ⁻ʲ} E(r₁, ..., X_j, i...) R_v( P_u0(r₁, ..., X_j, i...), ... ) where E = ∑ eq_k -// the polynomial is represented by the evaluations g_j(1), g_j(2), ..., g_j(deg(g_j)). -// The value g_j(0) is inferred from the equation g_j(0) + g_j(1) = g_{j-1}(r_{j-1}). By convention, g_0 is a constant polynomial equal to the claimed sum. -func (c *eqTimesGateEvalSumcheckClaims) computeGJ() (gJ polynomial.Polynomial) { - - // Let f ∈ { E(r₁, ..., X_j, d...) } ∪ {P_ul(r₁, ..., X_j, d...) }. It is linear in X_j, so f(m) = m×(f(1) - f(0)) + f(0), and f(0), f(1) are easily computed from the bookkeeping tables - EVal, EStep := computeValAndStep(c.eq, c.manager.memPool) - - puVal := make([]polynomial.MultiLin, len(c.inputPreprocessors)) //TODO: Make a two-dimensional array struct, and index it i-first rather than inputI first: would result in scanning memory access in the "d" loop and obviate the gateInput variable - puStep := make([]polynomial.MultiLin, len(c.inputPreprocessors)) //TODO, ctd: the greater degGJ, the more this would matter - - for i, puI := range c.inputPreprocessors { - puVal[i], puStep[i] = computeValAndStep(puI, c.manager.memPool) - } - - degGJ := 1 + c.wire.Gate.Degree() // guaranteed to be no smaller than the actual deg(g_j) - gJ = make([]fr.Element, degGJ) - - parallel := len(EVal) >= 1024 //TODO: Experiment with threshold - - var gateInput [][]fr.Element - - if parallel { - gateInput = [][]fr.Element{c.manager.memPool.Make(len(c.inputPreprocessors)), - c.manager.memPool.Make(len(c.inputPreprocessors))} - } else { - gateInput = [][]fr.Element{c.manager.memPool.Make(len(c.inputPreprocessors))} - } - - var wg sync.WaitGroup - - for d := 0; d < degGJ; d++ { - - notLastIteration := d+1 < degGJ - - sumOverI := func(res *fr.Element, gateInput []fr.Element, start, end int) { - for i := start; i < end; i++ { - - for inputI := range puVal { - gateInput[inputI].Set(&puVal[inputI][i]) - if notLastIteration { - puVal[inputI][i].Add(&puVal[inputI][i], &puStep[inputI][i]) - } - } - - // gJAtDI = gJ(d, i...) - gJAtDI := c.wire.Gate.Evaluate(gateInput...) - gJAtDI.Mul(&gJAtDI, &EVal[i]) - - res.Add(res, &gJAtDI) - - if notLastIteration { - EVal[i].Add(&EVal[i], &EStep[i]) - } - } - wg.Done() - } - - if parallel { - var firstHalf, secondHalf fr.Element - wg.Add(2) - go sumOverI(&secondHalf, gateInput[1], len(EVal)/2, len(EVal)) - go sumOverI(&firstHalf, gateInput[0], 0, len(EVal)/2) - wg.Wait() - gJ[d].Add(&firstHalf, &secondHalf) - } else { - wg.Add(1) // formalities - sumOverI(&gJ[d], gateInput[0], 0, len(EVal)) - } - } - - c.manager.memPool.Dump(gateInput...) - c.manager.memPool.Dump(EVal, EStep) - - for inputI := range puVal { - c.manager.memPool.Dump(puVal[inputI], puStep[inputI]) - } - - return -} - -// Next first folds the "preprocessing" and "eq" polynomials then compute the new g_j -func (c *eqTimesGateEvalSumcheckClaims) Next(element fr.Element) polynomial.Polynomial { - c.eq.Fold(element) - for i := 0; i < len(c.inputPreprocessors); i++ { - c.inputPreprocessors[i].Fold(element) - } - return c.computeGJ() -} - -func (c *eqTimesGateEvalSumcheckClaims) VarsNum() int { - return len(c.evaluationPoints[0]) -} - -func (c *eqTimesGateEvalSumcheckClaims) ClaimsNum() int { - return len(c.claimedEvaluations) -} - -func (c *eqTimesGateEvalSumcheckClaims) ProveFinalEval(r []fr.Element) interface{} { - - //defer the proof, return list of claims - evaluations := make([]fr.Element, 0, len(c.wire.Inputs)) - noMoreClaimsAllowed := make(map[*Wire]struct{}, len(c.inputPreprocessors)) - noMoreClaimsAllowed[c.wire] = struct{}{} - - for inI, in := range c.wire.Inputs { - puI := c.inputPreprocessors[inI] - if _, found := noMoreClaimsAllowed[in]; !found { - noMoreClaimsAllowed[in] = struct{}{} - puI.Fold(r[len(r)-1]) - c.manager.add(in, r, puI[0]) - evaluations = append(evaluations, puI[0]) - } - c.manager.memPool.Dump(puI) - } - - c.manager.memPool.Dump(c.claimedEvaluations, c.eq) - - return evaluations -} - -type claimsManager struct { - claimsMap map[*Wire]*eqTimesGateEvalSumcheckLazyClaims - assignment WireAssignment - memPool *polynomial.Pool -} - -func newClaimsManager(c Circuit, assignment WireAssignment, pool *polynomial.Pool) (claims claimsManager) { - claims.assignment = assignment - claims.claimsMap = make(map[*Wire]*eqTimesGateEvalSumcheckLazyClaims, len(c)) - claims.memPool = pool - - for i := range c { - wire := &c[i] - - claims.claimsMap[wire] = &eqTimesGateEvalSumcheckLazyClaims{ - wire: wire, - evaluationPoints: make([][]fr.Element, 0, wire.NbClaims()), - claimedEvaluations: claims.memPool.Make(wire.NbClaims()), - manager: &claims, - } - } - return -} - -func (m *claimsManager) add(wire *Wire, evaluationPoint []fr.Element, evaluation fr.Element) { - claim := m.claimsMap[wire] - i := len(claim.evaluationPoints) - claim.claimedEvaluations[i] = evaluation - claim.evaluationPoints = append(claim.evaluationPoints, evaluationPoint) -} - -func (m *claimsManager) getLazyClaim(wire *Wire) *eqTimesGateEvalSumcheckLazyClaims { - return m.claimsMap[wire] -} - -func (m *claimsManager) getClaim(wire *Wire) *eqTimesGateEvalSumcheckClaims { - lazy := m.claimsMap[wire] - res := &eqTimesGateEvalSumcheckClaims{ - wire: wire, - evaluationPoints: lazy.evaluationPoints, - claimedEvaluations: lazy.claimedEvaluations, - manager: m, - } - - if wire.IsInput() { - res.inputPreprocessors = []polynomial.MultiLin{m.memPool.Clone(m.assignment[wire])} - } else { - res.inputPreprocessors = make([]polynomial.MultiLin, len(wire.Inputs)) - - for inputI, inputW := range wire.Inputs { - res.inputPreprocessors[inputI] = m.memPool.Clone(m.assignment[inputW]) //will be edited later, so must be deep copied - } - } - return res -} - -func (m *claimsManager) deleteClaim(wire *Wire) { - delete(m.claimsMap, wire) -} - -type settings struct { - pool *polynomial.Pool - sorted []*Wire - transcript *fiatshamir.Transcript - transcriptPrefix string - nbVars int -} - -type Option func(*settings) - -func WithPool(pool *polynomial.Pool) Option { - return func(options *settings) { - options.pool = pool - } -} - -func WithSortedCircuit(sorted []*Wire) Option { - return func(options *settings) { - options.sorted = sorted - } -} - -func setup(c Circuit, assignment WireAssignment, transcriptSettings fiatshamir.Settings, options ...Option) (settings, error) { - var o settings - var err error - for _, option := range options { - option(&o) - } - - o.nbVars = assignment.NumVars() - nbInstances := assignment.NumInstances() - if 1< b { - return a - } - return b -} - -func ChallengeNames(sorted []*Wire, logNbInstances int, prefix string) []string { - - // Pre-compute the size TODO: Consider not doing this and just grow the list by appending - size := logNbInstances // first challenge - - for _, w := range sorted { - if w.noProof() { // no proof, no challenge - continue - } - if w.NbClaims() > 1 { //combine the claims - size++ - } - size += logNbInstances // full run of sumcheck on logNbInstances variables - } - - nums := make([]string, max(len(sorted), logNbInstances)) - for i := range nums { - nums[i] = strconv.Itoa(i) - } - - challenges := make([]string, size) - - // output wire claims - firstChallengePrefix := prefix + "fC." - for j := 0; j < logNbInstances; j++ { - challenges[j] = firstChallengePrefix + nums[j] - } - j := logNbInstances - for i := len(sorted) - 1; i >= 0; i-- { - if sorted[i].noProof() { - continue - } - wirePrefix := prefix + "w" + nums[i] + "." - - if sorted[i].NbClaims() > 1 { - challenges[j] = wirePrefix + "comb" - j++ - } - - partialSumPrefix := wirePrefix + "pSP." - for k := 0; k < logNbInstances; k++ { - challenges[j] = partialSumPrefix + nums[k] - j++ - } - } - return challenges -} - -func getFirstChallengeNames(logNbInstances int, prefix string) []string { - res := make([]string, logNbInstances) - firstChallengePrefix := prefix + "fC." - for i := 0; i < logNbInstances; i++ { - res[i] = firstChallengePrefix + strconv.Itoa(i) - } - return res -} - -func getChallenges(transcript *fiatshamir.Transcript, names []string) ([]fr.Element, error) { - res := make([]fr.Element, len(names)) - for i, name := range names { - if bytes, err := transcript.ComputeChallenge(name); err == nil { - res[i].SetBytes(bytes) - } else { - return nil, err - } - } - return res, nil -} - -// Prove consistency of the claimed assignment -func Prove(c Circuit, assignment WireAssignment, transcriptSettings fiatshamir.Settings, options ...Option) (Proof, error) { - o, err := setup(c, assignment, transcriptSettings, options...) - if err != nil { - return nil, err - } - - claims := newClaimsManager(c, assignment, o.pool) - - proof := make(Proof, len(c)) - // firstChallenge called rho in the paper - var firstChallenge []fr.Element - firstChallenge, err = getChallenges(o.transcript, getFirstChallengeNames(o.nbVars, o.transcriptPrefix)) - if err != nil { - return nil, err - } - - wirePrefix := o.transcriptPrefix + "w" - var baseChallenge [][]byte - for i := len(c) - 1; i >= 0; i-- { - - wire := o.sorted[i] - - if wire.IsOutput() { - claims.add(wire, firstChallenge, assignment[wire].Evaluate(firstChallenge, claims.memPool)) - } - - claim := claims.getClaim(wire) - if wire.noProof() { // input wires with one claim only - proof[i] = sumcheck.Proof{ - PartialSumPolys: []polynomial.Polynomial{}, - FinalEvalProof: []fr.Element{}, - } - } else { - if proof[i], err = sumcheck.Prove( - claim, fiatshamir.WithTranscript(o.transcript, wirePrefix+strconv.Itoa(i)+".", baseChallenge...), - ); err != nil { - return proof, err - } - - finalEvalProof := proof[i].FinalEvalProof.([]fr.Element) - baseChallenge = make([][]byte, len(finalEvalProof)) - for j := range finalEvalProof { - bytes := finalEvalProof[j].Bytes() - baseChallenge[j] = bytes[:] - } - } - // the verifier checks a single claim about input wires itself - claims.deleteClaim(wire) - } - - return proof, nil -} - -// Verify the consistency of the claimed output with the claimed input -// Unlike in Prove, the assignment argument need not be complete -func Verify(c Circuit, assignment WireAssignment, proof Proof, transcriptSettings fiatshamir.Settings, options ...Option) error { - o, err := setup(c, assignment, transcriptSettings, options...) - if err != nil { - return err - } - - claims := newClaimsManager(c, assignment, o.pool) - - var firstChallenge []fr.Element - firstChallenge, err = getChallenges(o.transcript, getFirstChallengeNames(o.nbVars, o.transcriptPrefix)) - if err != nil { - return err - } - - wirePrefix := o.transcriptPrefix + "w" - var baseChallenge [][]byte - for i := len(c) - 1; i >= 0; i-- { - wire := o.sorted[i] - - if wire.IsOutput() { - claims.add(wire, firstChallenge, assignment[wire].Evaluate(firstChallenge, claims.memPool)) - } - - proofW := proof[i] - finalEvalProof := proofW.FinalEvalProof.([]fr.Element) - claim := claims.getLazyClaim(wire) - if wire.noProof() { // input wires with one claim only - // make sure the proof is empty - if len(finalEvalProof) != 0 || len(proofW.PartialSumPolys) != 0 { - return fmt.Errorf("no proof allowed for input wire with a single claim") - } - - if wire.NbClaims() == 1 { // input wire - // simply evaluate and see if it matches - evaluation := assignment[wire].Evaluate(claim.evaluationPoints[0], claims.memPool) - if !claim.claimedEvaluations[0].Equal(&evaluation) { - return fmt.Errorf("incorrect input wire claim") - } - } - } else if err = sumcheck.Verify( - claim, proof[i], fiatshamir.WithTranscript(o.transcript, wirePrefix+strconv.Itoa(i)+".", baseChallenge...), - ); err == nil { - baseChallenge = make([][]byte, len(finalEvalProof)) - for j := range finalEvalProof { - bytes := finalEvalProof[j].Bytes() - baseChallenge[j] = bytes[:] - } - } else { - return fmt.Errorf("sumcheck proof rejected: %v", err) //TODO: Any polynomials to dump? - } - claims.deleteClaim(wire) - } - return nil -} - -type IdentityGate struct{} - -func (IdentityGate) Evaluate(input ...fr.Element) fr.Element { - return input[0] -} - -func (IdentityGate) Degree() int { - return 1 -} - -// outputsList also sets the nbUniqueOutputs fields. It also sets the wire metadata. -func outputsList(c Circuit, indexes map[*Wire]int) [][]int { - res := make([][]int, len(c)) - for i := range c { - res[i] = make([]int, 0) - c[i].nbUniqueOutputs = 0 - if c[i].IsInput() { - c[i].Gate = IdentityGate{} - } - } - ins := make(map[int]struct{}, len(c)) - for i := range c { - for k := range ins { // clear map - delete(ins, k) - } - for _, in := range c[i].Inputs { - inI := indexes[in] - res[inI] = append(res[inI], i) - if _, ok := ins[inI]; !ok { - in.nbUniqueOutputs++ - ins[inI] = struct{}{} - } - } - } - return res -} - -type topSortData struct { - outputs [][]int - status []int // status > 0 indicates number of inputs left to be ready. status = 0 means ready. status = -1 means done - index map[*Wire]int - leastReady int -} - -func (d *topSortData) markDone(i int) { - - d.status[i] = -1 - - for _, outI := range d.outputs[i] { - d.status[outI]-- - if d.status[outI] == 0 && outI < d.leastReady { - d.leastReady = outI - } - } - - for d.leastReady < len(d.status) && d.status[d.leastReady] != 0 { - d.leastReady++ - } -} - -func indexMap(c Circuit) map[*Wire]int { - res := make(map[*Wire]int, len(c)) - for i := range c { - res[&c[i]] = i - } - return res -} - -func statusList(c Circuit) []int { - res := make([]int, len(c)) - for i := range c { - res[i] = len(c[i].Inputs) - } - return res -} - -// topologicalSort sorts the wires in order of dependence. Such that for any wire, any one it depends on -// occurs before it. It tries to stick to the input order as much as possible. An already sorted list will remain unchanged. -// It also sets the nbOutput flags, and a dummy IdentityGate for input wires. -// Worst-case inefficient O(n^2), but that probably won't matter since the circuits are small. -// Furthermore, it is efficient with already-close-to-sorted lists, which are the expected input -func topologicalSort(c Circuit) []*Wire { - var data topSortData - data.index = indexMap(c) - data.outputs = outputsList(c, data.index) - data.status = statusList(c) - sorted := make([]*Wire, len(c)) - - for data.leastReady = 0; data.status[data.leastReady] != 0; data.leastReady++ { - } - - for i := range c { - sorted[i] = &c[data.leastReady] - data.markDone(data.leastReady) - } - - return sorted -} - -// Complete the circuit evaluation from input values -func (a WireAssignment) Complete(c Circuit) WireAssignment { - - sortedWires := topologicalSort(c) - - numEvaluations := 0 - - for _, w := range sortedWires { - if !w.IsInput() { - if numEvaluations == 0 { - numEvaluations = len(a[w.Inputs[0]]) - } - evals := make([]fr.Element, numEvaluations) - ins := make([]fr.Element, len(w.Inputs)) - for k := 0; k < numEvaluations; k++ { - for inI, in := range w.Inputs { - ins[inI] = a[in][k] - } - evals[k] = w.Gate.Evaluate(ins...) - } - a[w] = evals - } - } - return a -} - -func (a WireAssignment) NumInstances() int { - for _, aW := range a { - return len(aW) - } - panic("empty assignment") -} - -func (a WireAssignment) NumVars() int { - for _, aW := range a { - return aW.NumVars() - } - panic("empty assignment") -} diff --git a/tools/gnark/bn254/fr/gkr/gkr_test.go b/tools/gnark/bn254/fr/gkr/gkr_test.go deleted file mode 100644 index 557a8ed0..00000000 --- a/tools/gnark/bn254/fr/gkr/gkr_test.go +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package gkr - -import ( - "encoding/json" - "fmt" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/polynomial" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/sumcheck" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/test_vector_utils" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "github.com/stretchr/testify/assert" - "os" - "path/filepath" - "reflect" - "strconv" - "testing" -) - -func TestNoGateTwoInstances(t *testing.T) { - // Testing a single instance is not possible because the sumcheck implementation doesn't cover the trivial 0-variate case - testNoGate(t, []fr.Element{four, three}) -} - -func TestNoGate(t *testing.T) { - testManyInstances(t, 1, testNoGate) -} - -func TestSingleMulGateTwoInstances(t *testing.T) { - testSingleMulGate(t, []fr.Element{four, three}, []fr.Element{two, three}) -} - -func TestSingleMulGate(t *testing.T) { - testManyInstances(t, 2, testSingleMulGate) -} - -func TestSingleInputTwoIdentityGatesTwoInstances(t *testing.T) { - - testSingleInputTwoIdentityGates(t, []fr.Element{two, three}) -} - -func TestSingleInputTwoIdentityGates(t *testing.T) { - - testManyInstances(t, 2, testSingleInputTwoIdentityGates) -} - -func TestSingleInputTwoIdentityGatesComposedTwoInstances(t *testing.T) { - testSingleInputTwoIdentityGatesComposed(t, []fr.Element{two, one}) -} - -func TestSingleInputTwoIdentityGatesComposed(t *testing.T) { - testManyInstances(t, 1, testSingleInputTwoIdentityGatesComposed) -} - -func TestSingleMimcCipherGateTwoInstances(t *testing.T) { - testSingleMimcCipherGate(t, []fr.Element{one, one}, []fr.Element{one, two}) -} - -func TestSingleMimcCipherGate(t *testing.T) { - testManyInstances(t, 2, testSingleMimcCipherGate) -} - -func TestATimesBSquaredTwoInstances(t *testing.T) { - testATimesBSquared(t, 2, []fr.Element{one, one}, []fr.Element{one, two}) -} - -func TestShallowMimcTwoInstances(t *testing.T) { - testMimc(t, 2, []fr.Element{one, one}, []fr.Element{one, two}) -} -func TestMimcTwoInstances(t *testing.T) { - testMimc(t, 93, []fr.Element{one, one}, []fr.Element{one, two}) -} - -func TestMimc(t *testing.T) { - testManyInstances(t, 2, generateTestMimc(93)) -} - -func generateTestMimc(numRounds int) func(*testing.T, ...[]fr.Element) { - return func(t *testing.T, inputAssignments ...[]fr.Element) { - testMimc(t, numRounds, inputAssignments...) - } -} - -func TestSumcheckFromSingleInputTwoIdentityGatesGateTwoInstances(t *testing.T) { - circuit := Circuit{Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{}, - nbUniqueOutputs: 2, - }} - - wire := &circuit[0] - - assignment := WireAssignment{&circuit[0]: []fr.Element{two, three}} - pool := polynomial.NewPool(256, 1<<11) - - claimsManagerGen := func() *claimsManager { - manager := newClaimsManager(circuit, assignment, &pool) - manager.add(wire, []fr.Element{three}, five) - manager.add(wire, []fr.Element{four}, six) - return &manager - } - - transcriptGen := test_vector_utils.NewMessageCounterGenerator(4, 1) - - proof, err := sumcheck.Prove(claimsManagerGen().getClaim(wire), fiatshamir.WithHash(transcriptGen(), nil)) - assert.NoError(t, err) - err = sumcheck.Verify(claimsManagerGen().getLazyClaim(wire), proof, fiatshamir.WithHash(transcriptGen(), nil)) - assert.NoError(t, err) -} - -var one, two, three, four, five, six fr.Element - -func init() { - one.SetOne() - two.Double(&one) - three.Add(&two, &one) - four.Double(&two) - five.Add(&three, &two) - six.Double(&three) -} - -var testManyInstancesLogMaxInstances = -1 - -func getLogMaxInstances(t *testing.T) int { - if testManyInstancesLogMaxInstances == -1 { - - s := os.Getenv("GKR_LOG_INSTANCES") - if s == "" { - testManyInstancesLogMaxInstances = 5 - } else { - var err error - testManyInstancesLogMaxInstances, err = strconv.Atoi(s) - if err != nil { - t.Error(err) - } - } - - } - return testManyInstancesLogMaxInstances -} - -func testManyInstances(t *testing.T, numInput int, test func(*testing.T, ...[]fr.Element)) { - fullAssignments := make([][]fr.Element, numInput) - maxSize := 1 << getLogMaxInstances(t) - - t.Log("Entered test orchestrator, assigning and randomizing inputs") - - for i := range fullAssignments { - fullAssignments[i] = make([]fr.Element, maxSize) - setRandom(fullAssignments[i]) - } - - inputAssignments := make([][]fr.Element, numInput) - for numEvals := maxSize; numEvals <= maxSize; numEvals *= 2 { - for i, fullAssignment := range fullAssignments { - inputAssignments[i] = fullAssignment[:numEvals] - } - - t.Log("Selected inputs for test") - test(t, inputAssignments...) - } -} - -func testNoGate(t *testing.T, inputAssignments ...[]fr.Element) { - c := Circuit{ - { - Inputs: []*Wire{}, - Gate: nil, - }, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0]} - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err) - - // Even though a hash is called here, the proof is empty - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err, "proof rejected") -} - -func testSingleMulGate(t *testing.T, inputAssignments ...[]fr.Element) { - - c := make(Circuit, 3) - c[2] = Wire{ - Gate: mulGate{}, - Inputs: []*Wire{&c[0], &c[1]}, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func testSingleInputTwoIdentityGates(t *testing.T, inputAssignments ...[]fr.Element) { - c := make(Circuit, 3) - - c[1] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[0]}, - } - - c[2] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[0]}, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func testSingleMimcCipherGate(t *testing.T, inputAssignments ...[]fr.Element) { - c := make(Circuit, 3) - - c[2] = Wire{ - Gate: mimcCipherGate{}, - Inputs: []*Wire{&c[0], &c[1]}, - } - - t.Log("Evaluating all circuit wires") - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - t.Log("Circuit evaluation complete") - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - t.Log("Proof complete") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - t.Log("Successful verification complete") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") - t.Log("Unsuccessful verification complete") -} - -func testSingleInputTwoIdentityGatesComposed(t *testing.T, inputAssignments ...[]fr.Element) { - c := make(Circuit, 3) - - c[1] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[0]}, - } - c[2] = Wire{ - Gate: IdentityGate{}, - Inputs: []*Wire{&c[1]}, - } - - assignment := WireAssignment{&c[0]: inputAssignments[0]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func mimcCircuit(numRounds int) Circuit { - c := make(Circuit, numRounds+2) - - for i := 2; i < len(c); i++ { - c[i] = Wire{ - Gate: mimcCipherGate{}, - Inputs: []*Wire{&c[i-1], &c[0]}, - } - } - return c -} - -func testMimc(t *testing.T, numRounds int, inputAssignments ...[]fr.Element) { - //TODO: Implement mimc correctly. Currently, the computation is mimc(a,b) = cipher( cipher( ... cipher(a, b), b) ..., b) - // @AlexandreBelling: Please explain the extra layers in https://github.com/ConsenSys/gkr-mimc/blob/81eada039ab4ed403b7726b535adb63026e8011f/examples/mimc.go#L10 - - c := mimcCircuit(numRounds) - - t.Log("Evaluating all circuit wires") - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - t.Log("Circuit evaluation complete") - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - t.Log("Proof finished") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - t.Log("Successful verification finished") - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") - t.Log("Unsuccessful verification finished") -} - -func testATimesBSquared(t *testing.T, numRounds int, inputAssignments ...[]fr.Element) { - // This imitates the MiMC circuit - - c := make(Circuit, numRounds+2) - - for i := 2; i < len(c); i++ { - c[i] = Wire{ - Gate: mulGate{}, - Inputs: []*Wire{&c[i-1], &c[0]}, - } - } - - assignment := WireAssignment{&c[0]: inputAssignments[0], &c[1]: inputAssignments[1]}.Complete(c) - - proof, err := Prove(c, assignment, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err) - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(0, 1))) - assert.NoError(t, err, "proof rejected") - - err = Verify(c, assignment, proof, fiatshamir.WithHash(test_vector_utils.NewMessageCounter(1, 1))) - assert.NotNil(t, err, "bad proof accepted") -} - -func setRandom(slice []fr.Element) { - for i := range slice { - slice[i].SetRandom() - } -} - -func generateTestProver(path string) func(t *testing.T) { - return func(t *testing.T) { - testCase, err := newTestCase(path) - assert.NoError(t, err) - proof, err := Prove(testCase.Circuit, testCase.FullAssignment, testCase.transcriptSetting()) - assert.NoError(t, err) - assert.NoError(t, proofEquals(testCase.Proof, proof)) - } -} - -func generateTestVerifier(path string) func(t *testing.T) { - return func(t *testing.T) { - testCase, err := newTestCase(path) - assert.NoError(t, err) - err = Verify(testCase.Circuit, testCase.InOutAssignment, testCase.Proof, testCase.transcriptSetting()) - assert.NoError(t, err, "proof rejected") - testCase, err = newTestCase(path) - assert.NoError(t, err) - err = Verify(testCase.Circuit, testCase.InOutAssignment, testCase.Proof, fiatshamir.WithHash(&test_vector_utils.MapHash{Map: testCase.Hash}, []byte{1})) - assert.NotNil(t, err, "bad proof accepted") - } -} - -func TestGkrVectors(t *testing.T) { - - testDirPath := "../../../../internal/generator/gkr/test_vectors" - dirEntries, err := os.ReadDir(testDirPath) - assert.NoError(t, err) - for _, dirEntry := range dirEntries { - if !dirEntry.IsDir() { - - if filepath.Ext(dirEntry.Name()) == ".json" { - path := filepath.Join(testDirPath, dirEntry.Name()) - noExt := dirEntry.Name()[:len(dirEntry.Name())-len(".json")] - - t.Run(noExt+"_prover", generateTestProver(path)) - t.Run(noExt+"_verifier", generateTestVerifier(path)) - - } - } - } -} - -func proofEquals(expected Proof, seen Proof) error { - if len(expected) != len(seen) { - return fmt.Errorf("length mismatch %d ≠ %d", len(expected), len(seen)) - } - for i, x := range expected { - xSeen := seen[i] - - if xSeen.FinalEvalProof == nil { - if seenFinalEval := x.FinalEvalProof.([]fr.Element); len(seenFinalEval) != 0 { - return fmt.Errorf("length mismatch %d ≠ %d", 0, len(seenFinalEval)) - } - } else { - if err := test_vector_utils.SliceEquals(x.FinalEvalProof.([]fr.Element), xSeen.FinalEvalProof.([]fr.Element)); err != nil { - return fmt.Errorf("final evaluation proof mismatch") - } - } - if err := test_vector_utils.PolynomialSliceEquals(x.PartialSumPolys, xSeen.PartialSumPolys); err != nil { - return err - } - } - return nil -} - -func BenchmarkGkrMimc(b *testing.B) { - const N = 1 << 19 - fmt.Println("creating circuit structure") - c := mimcCircuit(91) - - in0 := make([]fr.Element, N) - in1 := make([]fr.Element, N) - setRandom(in0) - setRandom(in1) - - fmt.Println("evaluating circuit") - assignment := WireAssignment{&c[0]: in0, &c[1]: in1}.Complete(c) - - //b.ResetTimer() - fmt.Println("constructing proof") - Prove(c, assignment, fiatshamir.WithHash(mimc.NewMiMC())) -} - -func TestTopSortTrivial(t *testing.T) { - c := make(Circuit, 2) - c[0].Inputs = []*Wire{&c[1]} - sorted := topologicalSort(c) - assert.Equal(t, []*Wire{&c[1], &c[0]}, sorted) -} - -func TestTopSortDeep(t *testing.T) { - c := make(Circuit, 4) - c[0].Inputs = []*Wire{&c[2]} - c[1].Inputs = []*Wire{&c[3]} - c[2].Inputs = []*Wire{} - c[3].Inputs = []*Wire{&c[0]} - sorted := topologicalSort(c) - assert.Equal(t, []*Wire{&c[2], &c[0], &c[3], &c[1]}, sorted) -} - -func TestTopSortWide(t *testing.T) { - c := make(Circuit, 10) - c[0].Inputs = []*Wire{&c[3], &c[8]} - c[1].Inputs = []*Wire{&c[6]} - c[2].Inputs = []*Wire{&c[4]} - c[3].Inputs = []*Wire{} - c[4].Inputs = []*Wire{} - c[5].Inputs = []*Wire{&c[9]} - c[6].Inputs = []*Wire{&c[9]} - c[7].Inputs = []*Wire{&c[9], &c[5], &c[2]} - c[8].Inputs = []*Wire{&c[4], &c[3]} - c[9].Inputs = []*Wire{} - - sorted := topologicalSort(c) - sortedExpected := []*Wire{&c[3], &c[4], &c[2], &c[8], &c[0], &c[9], &c[5], &c[6], &c[1], &c[7]} - - assert.Equal(t, sortedExpected, sorted) -} - -type WireInfo struct { - Gate string `json:"gate"` - Inputs []int `json:"inputs"` -} - -type CircuitInfo []WireInfo - -var circuitCache = make(map[string]Circuit) - -func getCircuit(path string) (Circuit, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - if circuit, ok := circuitCache[path]; ok { - return circuit, nil - } - var bytes []byte - if bytes, err = os.ReadFile(path); err == nil { - var circuitInfo CircuitInfo - if err = json.Unmarshal(bytes, &circuitInfo); err == nil { - circuit := circuitInfo.toCircuit() - circuitCache[path] = circuit - return circuit, nil - } else { - return nil, err - } - } else { - return nil, err - } -} - -func (c CircuitInfo) toCircuit() (circuit Circuit) { - circuit = make(Circuit, len(c)) - for i := range c { - circuit[i].Gate = gates[c[i].Gate] - circuit[i].Inputs = make([]*Wire, len(c[i].Inputs)) - for k, inputCoord := range c[i].Inputs { - input := &circuit[inputCoord] - circuit[i].Inputs[k] = input - } - } - return -} - -var gates map[string]Gate - -func init() { - gates = make(map[string]Gate) - gates["identity"] = IdentityGate{} - gates["mul"] = mulGate{} - gates["mimc"] = mimcCipherGate{} //TODO: Add ark - gates["select-input-3"] = _select(2) -} - -type mimcCipherGate struct { - ark fr.Element -} - -func (m mimcCipherGate) Evaluate(input ...fr.Element) (res fr.Element) { - var sum fr.Element - - sum. - Add(&input[0], &input[1]). - Add(&sum, &m.ark) - - res.Square(&sum) // sum^2 - res.Mul(&res, &sum) // sum^3 - res.Square(&res) //sum^6 - res.Mul(&res, &sum) //sum^7 - - return -} - -func (m mimcCipherGate) Degree() int { - return 7 -} - -type PrintableProof []PrintableSumcheckProof - -type PrintableSumcheckProof struct { - FinalEvalProof interface{} `json:"finalEvalProof"` - PartialSumPolys [][]interface{} `json:"partialSumPolys"` -} - -func unmarshalProof(printable PrintableProof) (Proof, error) { - proof := make(Proof, len(printable)) - for i := range printable { - finalEvalProof := []fr.Element(nil) - - if printable[i].FinalEvalProof != nil { - finalEvalSlice := reflect.ValueOf(printable[i].FinalEvalProof) - finalEvalProof = make([]fr.Element, finalEvalSlice.Len()) - for k := range finalEvalProof { - if _, err := test_vector_utils.SetElement(&finalEvalProof[k], finalEvalSlice.Index(k).Interface()); err != nil { - return nil, err - } - } - } - - proof[i] = sumcheck.Proof{ - PartialSumPolys: make([]polynomial.Polynomial, len(printable[i].PartialSumPolys)), - FinalEvalProof: finalEvalProof, - } - for k := range printable[i].PartialSumPolys { - var err error - if proof[i].PartialSumPolys[k], err = test_vector_utils.SliceToElementSlice(printable[i].PartialSumPolys[k]); err != nil { - return nil, err - } - } - } - return proof, nil -} - -type TestCase struct { - Circuit Circuit - Hash *test_vector_utils.ElementMap - Proof Proof - FullAssignment WireAssignment - InOutAssignment WireAssignment -} - -type TestCaseInfo struct { - Hash string `json:"hash"` - Circuit string `json:"circuit"` - Input [][]interface{} `json:"input"` - Output [][]interface{} `json:"output"` - Proof PrintableProof `json:"proof"` -} - -var testCases = make(map[string]*TestCase) - -func newTestCase(path string) (*TestCase, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - dir := filepath.Dir(path) - - tCase, ok := testCases[path] - if !ok { - var bytes []byte - if bytes, err = os.ReadFile(path); err == nil { - var info TestCaseInfo - err = json.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - - var circuit Circuit - if circuit, err = getCircuit(filepath.Join(dir, info.Circuit)); err != nil { - return nil, err - } - var _hash *test_vector_utils.ElementMap - if _hash, err = test_vector_utils.ElementMapFromFile(filepath.Join(dir, info.Hash)); err != nil { - return nil, err - } - var proof Proof - if proof, err = unmarshalProof(info.Proof); err != nil { - return nil, err - } - - fullAssignment := make(WireAssignment) - inOutAssignment := make(WireAssignment) - - sorted := topologicalSort(circuit) - - inI, outI := 0, 0 - for _, w := range sorted { - var assignmentRaw []interface{} - if w.IsInput() { - if inI == len(info.Input) { - return nil, fmt.Errorf("fewer input in vector than in circuit") - } - assignmentRaw = info.Input[inI] - inI++ - } else if w.IsOutput() { - if outI == len(info.Output) { - return nil, fmt.Errorf("fewer output in vector than in circuit") - } - assignmentRaw = info.Output[outI] - outI++ - } - if assignmentRaw != nil { - var wireAssignment []fr.Element - if wireAssignment, err = test_vector_utils.SliceToElementSlice(assignmentRaw); err != nil { - return nil, err - } - - fullAssignment[w] = wireAssignment - inOutAssignment[w] = wireAssignment - } - } - - fullAssignment.Complete(circuit) - - for _, w := range sorted { - if w.IsOutput() { - - if err = test_vector_utils.SliceEquals(inOutAssignment[w], fullAssignment[w]); err != nil { - return nil, fmt.Errorf("assignment mismatch: %v", err) - } - - } - } - - tCase = &TestCase{ - FullAssignment: fullAssignment, - InOutAssignment: inOutAssignment, - Proof: proof, - Hash: _hash, - Circuit: circuit, - } - - testCases[path] = tCase - } else { - return nil, err - } - } - - return tCase, nil -} - -func (c *TestCase) transcriptSetting(initialChallenge ...[]byte) fiatshamir.Settings { - return fiatshamir.WithHash(&test_vector_utils.MapHash{Map: c.Hash}, initialChallenge...) -} - -type mulGate struct{} - -func (g mulGate) Evaluate(element ...fr.Element) (result fr.Element) { - result.Mul(&element[0], &element[1]) - return -} - -func (g mulGate) Degree() int { - return 2 -} - -type _select int - -func (g _select) Evaluate(in ...fr.Element) fr.Element { - return in[g] -} - -func (g _select) Degree() int { - return 1 -} diff --git a/tools/gnark/bn254/fr/kzg/doc.go b/tools/gnark/bn254/fr/kzg/doc.go deleted file mode 100644 index d8a77e8f..00000000 --- a/tools/gnark/bn254/fr/kzg/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package kzg provides a KZG commitment scheme. -package kzg diff --git a/tools/gnark/bn254/fr/kzg/kzg.go b/tools/gnark/bn254/fr/kzg/kzg.go deleted file mode 100644 index 8de86fa9..00000000 --- a/tools/gnark/bn254/fr/kzg/kzg.go +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package kzg - -import ( - "errors" - "hash" - "math/big" - "sync" - - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/ecc/bn254" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrInvalidNbDigests = errors.New("number of digests is not the same as the number of polynomials") - ErrInvalidPolynomialSize = errors.New("invalid polynomial size (larger than SRS or == 0)") - ErrVerifyOpeningProof = errors.New("can't verify opening proof") - ErrVerifyBatchOpeningSinglePoint = errors.New("can't verify batch opening proof at single point") - ErrMinSRSSize = errors.New("minimum srs size is 2") -) - -// Digest commitment of a polynomial. -type Digest = bn254.G1Affine - -// SRS stores the result of the MPC -type SRS struct { - G1 []bn254.G1Affine // [G₁ [α]G₁ , [α²]G₁, ... ] - G2 [2]bn254.G2Affine // [G₂, [α]G₂ ] -} - -// eval returns p(point) where p is interpreted as a polynomial -// ∑_{i= 0; i-- { - res.Mul(&res, &point).Add(&res, &p[i]) - } - return res -} - -// NewSRS returns a new SRS using alpha as randomness source -// -// In production, a SRS generated through MPC should be used. -// -// implements io.ReaderFrom and io.WriterTo -func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { - - if size < 2 { - return nil, ErrMinSRSSize - } - - var srs SRS - srs.G1 = make([]bn254.G1Affine, size) - - var alpha fr.Element - alpha.SetBigInt(bAlpha) - - _, _, gen1Aff, gen2Aff := bn254.Generators() - srs.G1[0] = gen1Aff - srs.G2[0] = gen2Aff - srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) - - alphas := make([]fr.Element, size-1) - alphas[0] = alpha - for i := 1; i < len(alphas); i++ { - alphas[i].Mul(&alphas[i-1], &alpha) - } - g1s := bn254.BatchScalarMultiplicationG1(&gen1Aff, alphas) - copy(srs.G1[1:], g1s) - - return &srs, nil -} - -// OpeningProof KZG proof for opening at a single point. -// -// implements io.ReaderFrom and io.WriterTo -type OpeningProof struct { - // H quotient polynomial (f - f(z))/(x-z) - H bn254.G1Affine - - // ClaimedValue purported value - ClaimedValue fr.Element -} - -// BatchOpeningProof opening proof for many polynomials at the same point -// -// implements io.ReaderFrom and io.WriterTo -type BatchOpeningProof struct { - // H quotient polynomial Sum_i gamma**i*(f - f(z))/(x-z) - H bn254.G1Affine - - // ClaimedValues purported values - ClaimedValues []fr.Element -} - -// Commit commits to a polynomial using a multi exponentiation with the SRS. -// It is assumed that the polynomial is in canonical form, in Montgomery form. -func Commit(p []fr.Element, srs *SRS, nbTasks ...int) (Digest, error) { - - if len(p) == 0 || len(p) > len(srs.G1) { - return Digest{}, ErrInvalidPolynomialSize - } - - var res bn254.G1Affine - - config := ecc.MultiExpConfig{} - if len(nbTasks) > 0 { - config.NbTasks = nbTasks[0] - } - if _, err := res.MultiExp(srs.G1[:len(p)], p, config); err != nil { - return Digest{}, err - } - - return res, nil -} - -// Open computes an opening proof of polynomial p at given point. -// fft.Domain Cardinality must be larger than p.Degree() -func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { - if len(p) == 0 || len(p) > len(srs.G1) { - return OpeningProof{}, ErrInvalidPolynomialSize - } - - // build the proof - res := OpeningProof{ - ClaimedValue: eval(p, point), - } - - // compute H - _p := make([]fr.Element, len(p)) - copy(_p, p) - h := dividePolyByXminusA(_p, res.ClaimedValue, point) - - _p = nil // h re-use this memory - - // commit to H - hCommit, err := Commit(h, srs) - if err != nil { - return OpeningProof{}, err - } - res.H.Set(&hCommit) - - return res, nil -} - -// Verify verifies a KZG opening proof at a single point -func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { - - // [f(a)]G₁ - var claimedValueG1Aff bn254.G1Jac - var claimedValueBigInt big.Int - proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) - claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt) - - // [f(α) - f(a)]G₁ - var fminusfaG1Jac bn254.G1Jac - fminusfaG1Jac.FromAffine(commitment) - fminusfaG1Jac.SubAssign(&claimedValueG1Aff) - - // [-H(α)]G₁ - var negH bn254.G1Affine - negH.Neg(&proof.H) - - // [α-a]G₂ - var alphaMinusaG2Jac, genG2Jac, alphaG2Jac bn254.G2Jac - var pointBigInt big.Int - point.ToBigIntRegular(&pointBigInt) - genG2Jac.FromAffine(&srs.G2[0]) - alphaG2Jac.FromAffine(&srs.G2[1]) - alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). - Neg(&alphaMinusaG2Jac). - AddAssign(&alphaG2Jac) - - // [α-a]G₂ - var xminusaG2Aff bn254.G2Affine - xminusaG2Aff.FromJacobian(&alphaMinusaG2Jac) - - // [f(α) - f(a)]G₁ - var fminusfaG1Aff bn254.G1Affine - fminusfaG1Aff.FromJacobian(&fminusfaG1Jac) - - // e([f(α) - f(a)]G₁, G₂).e([-H(α)]G₁, [α-a]G₂) ==? 1 - check, err := bn254.PairingCheck( - []bn254.G1Affine{fminusfaG1Aff, negH}, - []bn254.G2Affine{srs.G2[0], xminusaG2Aff}, - ) - if err != nil { - return err - } - if !check { - return ErrVerifyOpeningProof - } - return nil -} - -// BatchOpenSinglePoint creates a batch opening proof at point of a list of polynomials. -// It's an interactive protocol, made non interactive using Fiat Shamir. -// -// * point is the point at which the polynomials are opened. -// * digests is the list of committed polynomials to open, need to derive the challenge using Fiat Shamir. -// * polynomials is the list of polynomials to open, they are supposed to be of the same size. -func BatchOpenSinglePoint(polynomials [][]fr.Element, digests []Digest, point fr.Element, hf hash.Hash, srs *SRS) (BatchOpeningProof, error) { - - // check for invalid sizes - nbDigests := len(digests) - if nbDigests != len(polynomials) { - return BatchOpeningProof{}, ErrInvalidNbDigests - } - - // TODO ensure the polynomials are of the same size - largestPoly := -1 - for _, p := range polynomials { - if len(p) == 0 || len(p) > len(srs.G1) { - return BatchOpeningProof{}, ErrInvalidPolynomialSize - } - if len(p) > largestPoly { - largestPoly = len(p) - } - } - - var res BatchOpeningProof - - // compute the purported values - res.ClaimedValues = make([]fr.Element, len(polynomials)) - var wg sync.WaitGroup - wg.Add(len(polynomials)) - for i := 0; i < len(polynomials); i++ { - go func(_i int) { - res.ClaimedValues[_i] = eval(polynomials[_i], point) - wg.Done() - }(i) - } - - // derive the challenge γ, binded to the point and the commitments - gamma, err := deriveGamma(point, digests, hf) - if err != nil { - return BatchOpeningProof{}, err - } - - // ∑ᵢγⁱf(a) - var foldedEvaluations fr.Element - chSumGammai := make(chan struct{}, 1) - go func() { - // wait for polynomial evaluations to be completed (res.ClaimedValues) - wg.Wait() - foldedEvaluations = res.ClaimedValues[nbDigests-1] - for i := nbDigests - 2; i >= 0; i-- { - foldedEvaluations.Mul(&foldedEvaluations, &gamma). - Add(&foldedEvaluations, &res.ClaimedValues[i]) - } - close(chSumGammai) - }() - - // compute ∑ᵢγⁱfᵢ - // note: if we are willing to paralellize that, we could clone the poly and scale them by - // gamma n in parallel, before reducing into foldedPolynomials - foldedPolynomials := make([]fr.Element, largestPoly) - copy(foldedPolynomials, polynomials[0]) - acc := gamma - var pj fr.Element - for i := 1; i < len(polynomials); i++ { - for j := 0; j < len(polynomials[i]); j++ { - pj.Mul(&polynomials[i][j], &acc) - foldedPolynomials[j].Add(&foldedPolynomials[j], &pj) - } - acc.Mul(&acc, &gamma) - } - - // compute H - <-chSumGammai - h := dividePolyByXminusA(foldedPolynomials, foldedEvaluations, point) - foldedPolynomials = nil // same memory as h - - res.H, err = Commit(h, srs) - if err != nil { - return BatchOpeningProof{}, err - } - - return res, nil -} - -// FoldProof fold the digests and the proofs in batchOpeningProof using Fiat Shamir -// to obtain an opening proof at a single point. -// -// * digests list of digests on which batchOpeningProof is based -// * batchOpeningProof opening proof of digests -// * returns the folded version of batchOpeningProof, Digest, the folded version of digests -func FoldProof(digests []Digest, batchOpeningProof *BatchOpeningProof, point fr.Element, hf hash.Hash) (OpeningProof, Digest, error) { - - nbDigests := len(digests) - - // check consistancy between numbers of claims vs number of digests - if nbDigests != len(batchOpeningProof.ClaimedValues) { - return OpeningProof{}, Digest{}, ErrInvalidNbDigests - } - - // derive the challenge γ, binded to the point and the commitments - gamma, err := deriveGamma(point, digests, hf) - if err != nil { - return OpeningProof{}, Digest{}, ErrInvalidNbDigests - } - - // fold the claimed values and digests - // gammai = [1,γ,γ²,..,γⁿ⁻¹] - gammai := make([]fr.Element, nbDigests) - gammai[0].SetOne() - for i := 1; i < nbDigests; i++ { - gammai[i].Mul(&gammai[i-1], &gamma) - } - - foldedDigests, foldedEvaluations, err := fold(digests, batchOpeningProof.ClaimedValues, gammai) - if err != nil { - return OpeningProof{}, Digest{}, err - } - - // create the folded opening proof - var res OpeningProof - res.ClaimedValue.Set(&foldedEvaluations) - res.H.Set(&batchOpeningProof.H) - - return res, foldedDigests, nil -} - -// BatchVerifySinglePoint verifies a batched opening proof at a single point of a list of polynomials. -// -// * digests list of digests on which opening proof is done -// * batchOpeningProof proof of correct opening on the digests -func BatchVerifySinglePoint(digests []Digest, batchOpeningProof *BatchOpeningProof, point fr.Element, hf hash.Hash, srs *SRS) error { - - // fold the proof - foldedProof, foldedDigest, err := FoldProof(digests, batchOpeningProof, point, hf) - if err != nil { - return err - } - - // verify the foldedProof againts the foldedDigest - err = Verify(&foldedDigest, &foldedProof, point, srs) - return err - -} - -// BatchVerifyMultiPoints batch verifies a list of opening proofs at different points. -// The purpose of the batching is to have only one pairing for verifying several proofs. -// -// * digests list of committed polynomials -// * proofs list of opening proofs, one for each digest -// * points the list of points at which the opening are done -func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr.Element, srs *SRS) error { - - // check consistancy nb proogs vs nb digests - if len(digests) != len(proofs) || len(digests) != len(points) { - return ErrInvalidNbDigests - } - - // if only one digest, call Verify - if len(digests) == 1 { - return Verify(&digests[0], &proofs[0], points[0], srs) - } - - // sample random numbers λᵢ for sampling - randomNumbers := make([]fr.Element, len(digests)) - randomNumbers[0].SetOne() - for i := 1; i < len(randomNumbers); i++ { - _, err := randomNumbers[i].SetRandom() - if err != nil { - return err - } - } - - // fold the committed quotients compute ∑ᵢλᵢ[Hᵢ(α)]G₁ - var foldedQuotients bn254.G1Affine - quotients := make([]bn254.G1Affine, len(proofs)) - for i := 0; i < len(randomNumbers); i++ { - quotients[i].Set(&proofs[i].H) - } - config := ecc.MultiExpConfig{} - _, err := foldedQuotients.MultiExp(quotients, randomNumbers, config) - if err != nil { - return nil - } - - // fold digests and evals - evals := make([]fr.Element, len(digests)) - for i := 0; i < len(randomNumbers); i++ { - evals[i].Set(&proofs[i].ClaimedValue) - } - - // fold the digests: ∑ᵢλᵢ[f_i(α)]G₁ - // fold the evals : ∑ᵢλᵢfᵢ(aᵢ) - foldedDigests, foldedEvals, err := fold(digests, evals, randomNumbers) - if err != nil { - return err - } - - // compute commitment to folded Eval [∑ᵢλᵢfᵢ(aᵢ)]G₁ - var foldedEvalsCommit bn254.G1Affine - var foldedEvalsBigInt big.Int - foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) - foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) - - // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ - foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) - - // combien the points and the quotients using γᵢ - // ∑ᵢλᵢ[p_i]([Hᵢ(α)]G₁) - var foldedPointsQuotients bn254.G1Affine - for i := 0; i < len(randomNumbers); i++ { - randomNumbers[i].Mul(&randomNumbers[i], &points[i]) - } - _, err = foldedPointsQuotients.MultiExp(quotients, randomNumbers, config) - if err != nil { - return err - } - - // ∑ᵢλᵢ[f_i(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ + ∑ᵢλᵢ[p_i]([Hᵢ(α)]G₁) - // = [∑ᵢλᵢf_i(α) - ∑ᵢλᵢfᵢ(aᵢ) + ∑ᵢλᵢpᵢHᵢ(α)]G₁ - foldedDigests.Add(&foldedDigests, &foldedPointsQuotients) - - // -∑ᵢλᵢ[Qᵢ(α)]G₁ - foldedQuotients.Neg(&foldedQuotients) - - // pairing check - // e([∑ᵢλᵢ(fᵢ(α) - fᵢ(pᵢ) + pᵢHᵢ(α))]G₁, G₂).e([-∑ᵢλᵢ[Hᵢ(α)]G₁), [α]G₂) - check, err := bn254.PairingCheck( - []bn254.G1Affine{foldedDigests, foldedQuotients}, - []bn254.G2Affine{srs.G2[0], srs.G2[1]}, - ) - if err != nil { - return err - } - if !check { - return ErrVerifyOpeningProof - } - return nil - -} - -// fold folds digests and evaluations using the list of factors as random numbers. -// -// * digests list of digests to fold -// * evaluations list of evaluations to fold -// * factors list of multiplicative factors used for the folding (in Montgomery form) -// -// * Returns ∑ᵢcᵢdᵢ, ∑ᵢcᵢf(aᵢ) -func fold(di []Digest, fai []fr.Element, ci []fr.Element) (Digest, fr.Element, error) { - - // length inconsistancy between digests and evaluations should have been done before calling this function - nbDigests := len(di) - - // fold the claimed values ∑ᵢcᵢf(aᵢ) - var foldedEvaluations, tmp fr.Element - for i := 0; i < nbDigests; i++ { - tmp.Mul(&fai[i], &ci[i]) - foldedEvaluations.Add(&foldedEvaluations, &tmp) - } - - // fold the digests ∑ᵢ[cᵢ]([fᵢ(α)]G₁) - var foldedDigests Digest - _, err := foldedDigests.MultiExp(di, ci, ecc.MultiExpConfig{}) - if err != nil { - return foldedDigests, foldedEvaluations, err - } - - // folding done - return foldedDigests, foldedEvaluations, nil - -} - -// deriveGamma derives a challenge using Fiat Shamir to fold proofs. -func deriveGamma(point fr.Element, digests []Digest, hf hash.Hash) (fr.Element, error) { - - // derive the challenge gamma, binded to the point and the commitments - fs := fiatshamir.NewTranscript(hf, "gamma") - if err := fs.Bind("gamma", point.Marshal()); err != nil { - return fr.Element{}, err - } - for i := 0; i < len(digests); i++ { - if err := fs.Bind("gamma", digests[i].Marshal()); err != nil { - return fr.Element{}, err - } - } - gammaByte, err := fs.ComputeChallenge("gamma") - if err != nil { - return fr.Element{}, err - } - var gamma fr.Element - gamma.SetBytes(gammaByte) - - return gamma, nil -} - -// dividePolyByXminusA computes (f-f(a))/(x-a), in canonical basis, in regular form -// f memory is re-used for the result -func dividePolyByXminusA(f []fr.Element, fa, a fr.Element) []fr.Element { - - // first we compute f-f(a) - f[0].Sub(&f[0], &fa) - - // now we use syntetic division to divide by x-a - var t fr.Element - for i := len(f) - 2; i >= 0; i-- { - t.Mul(&f[i+1], &a) - - f[i].Add(&f[i], &t) - } - - // the result is of degree deg(f)-1 - return f[1:] -} diff --git a/tools/gnark/bn254/fr/kzg/kzg_test.go b/tools/gnark/bn254/fr/kzg/kzg_test.go deleted file mode 100644 index 42ad4838..00000000 --- a/tools/gnark/bn254/fr/kzg/kzg_test.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package kzg - -import ( - "bytes" - "crypto/sha256" - "math/big" - "reflect" - "testing" - - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/ecc/bn254" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" -) - -// testSRS re-used accross tests of the KZG scheme -var testSRS *SRS - -func init() { - const srsSize = 230 - testSRS, _ = NewSRS(ecc.NextPowerOfTwo(srsSize), new(big.Int).SetInt64(42)) -} - -func TestDividePolyByXminusA(t *testing.T) { - - const pSize = 230 - - // build random polynomial - pol := make([]fr.Element, pSize) - pol[0].SetRandom() - for i := 1; i < pSize; i++ { - pol[i] = pol[i-1] - } - - // evaluate the polynomial at a random point - var point fr.Element - point.SetRandom() - evaluation := eval(pol, point) - - // probabilistic test (using Schwartz Zippel lemma, evaluation at one point is enough) - var randPoint, xminusa fr.Element - randPoint.SetRandom() - polRandpoint := eval(pol, randPoint) - polRandpoint.Sub(&polRandpoint, &evaluation) // f(rand)-f(point) - - // compute f-f(a)/x-a - h := dividePolyByXminusA(pol, evaluation, point) - pol = nil // h reuses this memory - - if len(h) != 229 { - t.Fatal("inconsistant size of quotient") - } - - hRandPoint := eval(h, randPoint) - xminusa.Sub(&randPoint, &point) // rand-point - - // f(rand)-f(point) ==? h(rand)*(rand-point) - hRandPoint.Mul(&hRandPoint, &xminusa) - - if !hRandPoint.Equal(&polRandpoint) { - t.Fatal("Error f-f(a)/x-a") - } -} - -func TestSerializationSRS(t *testing.T) { - - // create a SRS - srs, err := NewSRS(64, new(big.Int).SetInt64(42)) - if err != nil { - t.Fatal(err) - } - - // serialize it... - var buf bytes.Buffer - _, err = srs.WriteTo(&buf) - if err != nil { - t.Fatal(err) - } - - // reconstruct the SRS - var _srs SRS - _, err = _srs.ReadFrom(&buf) - if err != nil { - t.Fatal(err) - } - - // compare - if !reflect.DeepEqual(srs, &_srs) { - t.Fatal("scheme serialization failed") - } - -} - -func TestCommit(t *testing.T) { - - // create a polynomial - f := make([]fr.Element, 60) - for i := 0; i < 60; i++ { - f[i].SetRandom() - } - - // commit using the method from KZG - _kzgCommit, err := Commit(f, testSRS) - if err != nil { - t.Fatal(err) - } - var kzgCommit bn254.G1Affine - kzgCommit.Unmarshal(_kzgCommit.Marshal()) - - // check commitment using manual commit - var x fr.Element - x.SetString("42") - fx := eval(f, x) - var fxbi big.Int - fx.ToBigIntRegular(&fxbi) - var manualCommit bn254.G1Affine - manualCommit.Set(&testSRS.G1[0]) - manualCommit.ScalarMultiplication(&manualCommit, &fxbi) - - // compare both results - if !kzgCommit.Equal(&manualCommit) { - t.Fatal("error KZG commitment") - } - -} - -func TestVerifySinglePoint(t *testing.T) { - - // create a polynomial - f := randomPolynomial(60) - - // commit the polynomial - digest, err := Commit(f, testSRS) - if err != nil { - t.Fatal(err) - } - - // compute opening proof at a random point - var point fr.Element - point.SetString("4321") - proof, err := Open(f, point, testSRS) - if err != nil { - t.Fatal(err) - } - - // verify the claimed valued - expected := eval(f, point) - if !proof.ClaimedValue.Equal(&expected) { - t.Fatal("inconsistant claimed value") - } - - // verify correct proof - err = Verify(&digest, &proof, point, testSRS) - if err != nil { - t.Fatal(err) - } - - { - // verify wrong proof - proof.ClaimedValue.Double(&proof.ClaimedValue) - err = Verify(&digest, &proof, point, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } - { - // verify wrong proof with quotient set to zero - // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 - proof.H.X.SetZero() - proof.H.Y.SetZero() - err = Verify(&digest, &proof, point, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } -} - -func TestBatchVerifySinglePoint(t *testing.T) { - - size := 40 - - // create polynomials - f := make([][]fr.Element, 10) - for i := 0; i < 10; i++ { - f[i] = randomPolynomial(size) - } - - // commit the polynomials - digests := make([]Digest, 10) - for i := 0; i < 10; i++ { - digests[i], _ = Commit(f[i], testSRS) - - } - - // pick a hash function - hf := sha256.New() - - // compute opening proof at a random point - var point fr.Element - point.SetString("4321") - proof, err := BatchOpenSinglePoint(f, digests, point, hf, testSRS) - if err != nil { - t.Fatal(err) - } - - // verify the claimed values - for i := 0; i < 10; i++ { - expectedClaim := eval(f[i], point) - if !expectedClaim.Equal(&proof.ClaimedValues[i]) { - t.Fatal("inconsistant claimed values") - } - } - - // verify correct proof - err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) - if err != nil { - t.Fatal(err) - } - - { - // verify wrong proof - proof.ClaimedValues[0].Double(&proof.ClaimedValues[0]) - err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } - { - // verify wrong proof with quotient set to zero - // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 - proof.H.X.SetZero() - proof.H.Y.SetZero() - err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) - if err == nil { - t.Fatal("verifying wrong proof should have failed") - } - } - -} - -func TestBatchVerifyMultiPoints(t *testing.T) { - - // create polynomials - f := make([][]fr.Element, 10) - for i := 0; i < 10; i++ { - f[i] = randomPolynomial(40) - } - - // commit the polynomials - digests := make([]Digest, 10) - for i := 0; i < 10; i++ { - digests[i], _ = Commit(f[i], testSRS) - } - - // pick a hash function - hf := sha256.New() - - // compute 2 batch opening proofs at 2 random points - points := make([]fr.Element, 2) - batchProofs := make([]BatchOpeningProof, 2) - points[0].SetRandom() - batchProofs[0], _ = BatchOpenSinglePoint(f[:5], digests[:5], points[0], hf, testSRS) - points[1].SetRandom() - batchProofs[1], _ = BatchOpenSinglePoint(f[5:], digests[5:], points[1], hf, testSRS) - - // fold the 2 batch opening proofs - proofs := make([]OpeningProof, 2) - foldedDigests := make([]Digest, 2) - proofs[0], foldedDigests[0], _ = FoldProof(digests[:5], &batchProofs[0], points[0], hf) - proofs[1], foldedDigests[1], _ = FoldProof(digests[5:], &batchProofs[1], points[1], hf) - - // check the the individual batch proofs are correct - err := Verify(&foldedDigests[0], &proofs[0], points[0], testSRS) - if err != nil { - t.Fatal(err) - } - err = Verify(&foldedDigests[1], &proofs[1], points[1], testSRS) - if err != nil { - t.Fatal(err) - } - - // batch verify correct folded proofs - err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) - if err != nil { - t.Fatal(err) - } - - { - // batch verify tampered folded proofs - proofs[0].ClaimedValue.Double(&proofs[0].ClaimedValue) - - err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) - if err == nil { - t.Fatal(err) - } - } - { - // batch verify tampered folded proofs with quotients set to infinity - // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 - proofs[0].H.X.SetZero() - proofs[0].H.Y.SetZero() - proofs[1].H.X.SetZero() - proofs[1].H.Y.SetZero() - err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) - if err == nil { - t.Fatal(err) - } - } - -} - -const benchSize = 1 << 16 - -func BenchmarkKZGCommit(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - // random polynomial - p := randomPolynomial(benchSize / 2) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = Commit(p, benchSRS) - } -} - -func BenchmarkDivideByXMinusA(b *testing.B) { - const pSize = 1 << 22 - - // build random polynomial - pol := make([]fr.Element, pSize) - pol[0].SetRandom() - for i := 1; i < pSize; i++ { - pol[i] = pol[i-1] - } - var a, fa fr.Element - a.SetRandom() - fa.SetRandom() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - dividePolyByXminusA(pol, fa, a) - pol = pol[:pSize] - pol[pSize-1] = pol[0] - } -} - -func BenchmarkKZGOpen(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // random polynomial - p := randomPolynomial(benchSize / 2) - var r fr.Element - r.SetRandom() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = Open(p, r, benchSRS) - } -} - -func BenchmarkKZGVerify(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // random polynomial - p := randomPolynomial(benchSize / 2) - var r fr.Element - r.SetRandom() - - // commit - comm, err := Commit(p, benchSRS) - if err != nil { - b.Fatal(err) - } - - // open - openingProof, err := Open(p, r, benchSRS) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - Verify(&comm, &openingProof, r, benchSRS) - } -} - -func BenchmarkKZGBatchOpen10(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // 10 random polynomials - var ps [10][]fr.Element - for i := 0; i < 10; i++ { - ps[i] = randomPolynomial(benchSize / 2) - } - - // commitments - var commitments [10]Digest - for i := 0; i < 10; i++ { - commitments[i], _ = Commit(ps[i], benchSRS) - } - - // pick a hash function - hf := sha256.New() - - var r fr.Element - r.SetRandom() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - BatchOpenSinglePoint(ps[:], commitments[:], r, hf, benchSRS) - } -} - -func BenchmarkKZGBatchVerify10(b *testing.B) { - benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) - if err != nil { - b.Fatal(err) - } - - // 10 random polynomials - var ps [10][]fr.Element - for i := 0; i < 10; i++ { - ps[i] = randomPolynomial(benchSize / 2) - } - - // commitments - var commitments [10]Digest - for i := 0; i < 10; i++ { - commitments[i], _ = Commit(ps[i], benchSRS) - } - - // pick a hash function - hf := sha256.New() - - var r fr.Element - r.SetRandom() - - proof, err := BatchOpenSinglePoint(ps[:], commitments[:], r, hf, benchSRS) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - BatchVerifySinglePoint(commitments[:], &proof, r, hf, benchSRS) - } -} - -func randomPolynomial(size int) []fr.Element { - f := make([]fr.Element, size) - for i := 0; i < size; i++ { - f[i].SetRandom() - } - return f -} diff --git a/tools/gnark/bn254/fr/kzg/marshal.go b/tools/gnark/bn254/fr/kzg/marshal.go deleted file mode 100644 index 9b0bb9bf..00000000 --- a/tools/gnark/bn254/fr/kzg/marshal.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package kzg - -import ( - "github.com/consensys/gnark-crypto/ecc/bn254" - "io" -) - -// WriteTo writes binary encoding of the SRS -func (srs *SRS) WriteTo(w io.Writer) (int64, error) { - // encode the SRS - enc := bn254.NewEncoder(w) - - toEncode := []interface{}{ - &srs.G2[0], - &srs.G2[1], - srs.G1, - } - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom decodes SRS data from reader. -func (srs *SRS) ReadFrom(r io.Reader) (int64, error) { - // decode the SRS - dec := bn254.NewDecoder(r) - - toDecode := []interface{}{ - &srs.G2[0], - &srs.G2[1], - &srs.G1, - } - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - return dec.BytesRead(), nil -} - -// WriteTo writes binary encoding of a OpeningProof -func (proof *OpeningProof) WriteTo(w io.Writer) (int64, error) { - enc := bn254.NewEncoder(w) - - toEncode := []interface{}{ - &proof.H, - &proof.ClaimedValue, - } - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom decodes OpeningProof data from reader. -func (proof *OpeningProof) ReadFrom(r io.Reader) (int64, error) { - dec := bn254.NewDecoder(r) - - toDecode := []interface{}{ - &proof.H, - &proof.ClaimedValue, - } - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - return dec.BytesRead(), nil -} - -// WriteTo writes binary encoding of a BatchOpeningProof -func (proof *BatchOpeningProof) WriteTo(w io.Writer) (int64, error) { - enc := bn254.NewEncoder(w) - - toEncode := []interface{}{ - &proof.H, - proof.ClaimedValues, - } - - for _, v := range toEncode { - if err := enc.Encode(v); err != nil { - return enc.BytesWritten(), err - } - } - - return enc.BytesWritten(), nil -} - -// ReadFrom decodes BatchOpeningProof data from reader. -func (proof *BatchOpeningProof) ReadFrom(r io.Reader) (int64, error) { - dec := bn254.NewDecoder(r) - - toDecode := []interface{}{ - &proof.H, - &proof.ClaimedValues, - } - - for _, v := range toDecode { - if err := dec.Decode(v); err != nil { - return dec.BytesRead(), err - } - } - - return dec.BytesRead(), nil -} diff --git a/tools/gnark/bn254/fr/mimc/decompose.go b/tools/gnark/bn254/fr/mimc/decompose.go deleted file mode 100644 index e61417b9..00000000 --- a/tools/gnark/bn254/fr/mimc/decompose.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package mimc - -import ( - "math/big" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" -) - -// Decompose interpret rawBytes as a bigInt x in big endian, -// and returns the digits of x (from LSB to MSB) when x is written -// in basis modulo. -func Decompose(rawBytes []byte) []fr.Element { - - rawBigInt := big.NewInt(0).SetBytes(rawBytes) - modulo := fr.Modulus() - - // maximum number of chunks that a function - maxNbChunks := len(rawBytes) / fr.Bytes - - res := make([]fr.Element, 0, maxNbChunks) - var tmp fr.Element - t := new(big.Int) - for rawBigInt.Sign() != 0 { - rawBigInt.DivMod(rawBigInt, modulo, t) - tmp.SetBigInt(t) - res = append(res, tmp) - } - - return res -} diff --git a/tools/gnark/bn254/fr/mimc/decompose_test.go b/tools/gnark/bn254/fr/mimc/decompose_test.go deleted file mode 100644 index 3597da7a..00000000 --- a/tools/gnark/bn254/fr/mimc/decompose_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package mimc - -import ( - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" -) - -func TestDecompose(t *testing.T) { - - // create 10 random digits in basis r - nbDigits := 10 - a := make([]fr.Element, nbDigits) - for i := 0; i < nbDigits; i++ { - a[i].SetRandom() - } - - // create a big int whose digits in basis r are a - m := fr.Modulus() - var b, tmp big.Int - for i := nbDigits - 1; i >= 0; i-- { - b.Mul(&b, m) - a[i].ToBigIntRegular(&tmp) - b.Add(&b, &tmp) - } - - // query the decomposition and compare to a - bb := b.Bytes() - d := Decompose(bb) - for i := 0; i < nbDigits; i++ { - if !d[i].Equal(&a[i]) { - t.Fatal("error decomposition") - } - } - -} diff --git a/tools/gnark/bn254/fr/mimc/doc.go b/tools/gnark/bn254/fr/mimc/doc.go deleted file mode 100644 index 497bd40a..00000000 --- a/tools/gnark/bn254/fr/mimc/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. -package mimc diff --git a/tools/gnark/bn254/fr/mimc/mimc.go b/tools/gnark/bn254/fr/mimc/mimc.go deleted file mode 100644 index 87a9776e..00000000 --- a/tools/gnark/bn254/fr/mimc/mimc.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package mimc - -import ( - "errors" - "hash" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "golang.org/x/crypto/sha3" - "math/big" - "sync" -) - -const ( - mimcNbRounds = 91 - seed = "seed" // seed to derive the constants - BlockSize = fr.Bytes // BlockSize size that mimc consumes -) - -// Params constants for the mimc hash function -var ( - mimcConstants [mimcNbRounds]fr.Element - once sync.Once -) - -// digest represents the partial evaluation of the checksum -// along with the params of the mimc function -type digest struct { - h fr.Element - data []byte // data to hash -} - -// GetConstants exposed to be used in gnark -func GetConstants() []big.Int { - once.Do(initConstants) // init constants - res := make([]big.Int, mimcNbRounds) - for i := 0; i < mimcNbRounds; i++ { - mimcConstants[i].ToBigIntRegular(&res[i]) - } - return res -} - -// NewMiMC returns a MiMCImpl object, pure-go reference implementation -func NewMiMC() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -// Reset resets the Hash to its initial state. -func (d *digest) Reset() { - d.data = nil - d.h = fr.Element{0, 0, 0, 0} -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (d *digest) Sum(b []byte) []byte { - buffer := d.checksum() - d.data = nil // flush the data already hashed - hash := buffer.Bytes() - b = append(b, hash[:]...) - return b -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (d *digest) Size() int { - return BlockSize -} - -// BlockSize returns the number of bytes Sum will return. -func (d *digest) BlockSize() int { - return BlockSize -} - -// Write (via the embedded io.Writer interface) adds more data to the running hash. -// -// Each []byte block of size BlockSize represents a big endian fr.Element. -// -// If len(p) is not a multiple of BlockSize and any of the []byte in p represent an integer -// larger than fr.Modulus, this function returns an error. -// -// To hash arbitrary data ([]byte not representing canonical field elements) use Decompose -// function in this package. -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - if n%BlockSize != 0 { - return 0, errors.New("invalid input length: must represent a list of field elements, expects a []byte of len m*BlockSize") - } - - // ensure each block represents a field element in canonical reduced form - for i := 0; i < n; i += BlockSize { - if _, err = fr.BigEndian.Element((*[BlockSize]byte)(p[i : i+BlockSize])); err != nil { - return 0, err - } - } - - d.data = append(d.data, p...) - return -} - -// Hash hash using Miyaguchi-Preneel: -// https://en.wikipedia.org/wiki/One-way_compression_function -// The XOR operation is replaced by field addition, data is in Montgomery form -func (d *digest) checksum() fr.Element { - // Write guarantees len(data) % BlockSize == 0 - - // TODO @ThomasPiellard shouldn't Sum() returns an error if there is no data? - if len(d.data) == 0 { - d.data = make([]byte, BlockSize) - } - - for i := 0; i < len(d.data); i += BlockSize { - x, _ := fr.BigEndian.Element((*[BlockSize]byte)(d.data[i : i+BlockSize])) - r := d.encrypt(x) - d.h.Add(&r, &d.h).Add(&d.h, &x) - } - - return d.h -} - -// plain execution of a mimc run -// m: message -// k: encryption key -func (d *digest) encrypt(m fr.Element) fr.Element { - once.Do(initConstants) // init constants - - for i := 0; i < mimcNbRounds; i++ { - // m = (m+k+c)^5 - var tmp fr.Element - tmp.Add(&m, &d.h).Add(&tmp, &mimcConstants[i]) - m.Square(&tmp). - Square(&m). - Mul(&m, &tmp) - } - m.Add(&m, &d.h) - return m -} - -// Sum computes the mimc hash of msg from seed -func Sum(msg []byte) ([]byte, error) { - var d digest - if _, err := d.Write(msg); err != nil { - return nil, err - } - h := d.checksum() - bytes := h.Bytes() - return bytes[:], nil -} - -func initConstants() { - bseed := ([]byte)(seed) - - hash := sha3.NewLegacyKeccak256() - _, _ = hash.Write(bseed) - rnd := hash.Sum(nil) // pre hash before use - hash.Reset() - _, _ = hash.Write(rnd) - - for i := 0; i < mimcNbRounds; i++ { - rnd = hash.Sum(nil) - mimcConstants[i].SetBytes(rnd) - hash.Reset() - _, _ = hash.Write(rnd) - } -} diff --git a/tools/gnark/bn254/fr/pedersen/pedersen.go b/tools/gnark/bn254/fr/pedersen/pedersen.go deleted file mode 100644 index 47564470..00000000 --- a/tools/gnark/bn254/fr/pedersen/pedersen.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package pedersen - -import ( - "crypto/rand" - "fmt" - "github.com/consensys/gnark-crypto/ecc" - "github.com/consensys/gnark-crypto/ecc/bn254" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "math/big" -) - -// Key for proof and verification -type Key struct { - g bn254.G2Affine // TODO @tabaie: does this really have to be randomized? - gRootSigmaNeg bn254.G2Affine //gRootSigmaNeg = g^{-1/σ} - basis []bn254.G1Affine - basisExpSigma []bn254.G1Affine -} - -func randomOnG2() (bn254.G2Affine, error) { // TODO: Add to G2.go? - gBytes := make([]byte, fr.Bytes) - if _, err := rand.Read(gBytes); err != nil { - return bn254.G2Affine{}, err - } - return bn254.HashToG2(gBytes, []byte("random on g2")) -} - -func Setup(basis []bn254.G1Affine) (Key, error) { - var ( - k Key - err error - ) - - if k.g, err = randomOnG2(); err != nil { - return k, err - } - - var modMinusOne big.Int - modMinusOne.Sub(fr.Modulus(), big.NewInt(1)) - var sigma *big.Int - if sigma, err = rand.Int(rand.Reader, &modMinusOne); err != nil { - return k, err - } - sigma.Add(sigma, big.NewInt(1)) - - var sigmaInvNeg big.Int - sigmaInvNeg.ModInverse(sigma, fr.Modulus()) - sigmaInvNeg.Sub(fr.Modulus(), &sigmaInvNeg) - k.gRootSigmaNeg.ScalarMultiplication(&k.g, &sigmaInvNeg) - - k.basisExpSigma = make([]bn254.G1Affine, len(basis)) - for i := range basis { - k.basisExpSigma[i].ScalarMultiplication(&basis[i], sigma) - } - - k.basis = basis - return k, err -} - -func (k *Key) Commit(values []fr.Element) (commitment bn254.G1Affine, knowledgeProof bn254.G1Affine, err error) { - - if len(values) != len(k.basis) { - err = fmt.Errorf("unexpected number of values") - return - } - - // TODO @gbotrel this will spawn more than one task, see - // https://github.com/ConsenSys/gnark-crypto/issues/269 - config := ecc.MultiExpConfig{ - NbTasks: 1, // TODO Experiment - } - - if _, err = commitment.MultiExp(k.basis, values, config); err != nil { - return - } - - _, err = knowledgeProof.MultiExp(k.basisExpSigma, values, config) - - return -} - -// VerifyKnowledgeProof checks if the proof of knowledge is valid -func (k *Key) VerifyKnowledgeProof(commitment bn254.G1Affine, knowledgeProof bn254.G1Affine) error { - - if !commitment.IsInSubGroup() || !knowledgeProof.IsInSubGroup() { - return fmt.Errorf("subgroup check failed") - } - - product, err := bn254.Pair([]bn254.G1Affine{commitment, knowledgeProof}, []bn254.G2Affine{k.g, k.gRootSigmaNeg}) - if err != nil { - return err - } - if product.IsOne() { - return nil - } - return fmt.Errorf("proof rejected") -} diff --git a/tools/gnark/bn254/fr/pedersen/pedersen_test.go b/tools/gnark/bn254/fr/pedersen/pedersen_test.go deleted file mode 100644 index eac9cebe..00000000 --- a/tools/gnark/bn254/fr/pedersen/pedersen_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package pedersen - -import ( - "github.com/consensys/gnark-crypto/ecc/bn254" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/stretchr/testify/assert" - "math/rand" - "testing" -) - -func interfaceSliceToFrSlice(t *testing.T, values ...interface{}) []fr.Element { - res := make([]fr.Element, len(values)) - for i, v := range values { - _, err := res[i].SetInterface(v) - assert.NoError(t, err) - } - return res -} - -func randomFrSlice(t *testing.T, size int) []interface{} { - res := make([]interface{}, size) - var err error - for i := range res { - var v fr.Element - res[i], err = v.SetRandom() - assert.NoError(t, err) - } - return res -} - -func randomOnG1() (bn254.G1Affine, error) { // TODO: Add to G1.go? - gBytes := make([]byte, fr.Bytes) - if _, err := rand.Read(gBytes); err != nil { - return bn254.G1Affine{}, err - } - return bn254.HashToG1(gBytes, []byte("random on g2")) -} - -func testCommit(t *testing.T, values ...interface{}) { - - basis := make([]bn254.G1Affine, len(values)) - for i := range basis { - var err error - basis[i], err = randomOnG1() - assert.NoError(t, err) - } - - var ( - key Key - err error - commitment, pok bn254.G1Affine - ) - - key, err = Setup(basis) - assert.NoError(t, err) - commitment, pok, err = key.Commit(interfaceSliceToFrSlice(t, values...)) - assert.NoError(t, err) - assert.NoError(t, key.VerifyKnowledgeProof(commitment, pok)) - - pok.Neg(&pok) - assert.NotNil(t, key.VerifyKnowledgeProof(commitment, pok)) -} - -func TestCommitToOne(t *testing.T) { - testCommit(t, 1) -} - -func TestCommitSingle(t *testing.T) { - testCommit(t, randomFrSlice(t, 1)...) -} - -func TestCommitFiveElements(t *testing.T) { - testCommit(t, randomFrSlice(t, 5)...) -} diff --git a/tools/gnark/bn254/fr/permutation/doc.go b/tools/gnark/bn254/fr/permutation/doc.go deleted file mode 100644 index bdf98e6c..00000000 --- a/tools/gnark/bn254/fr/permutation/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package permutation provides an API to build permutation proofs. -package permutation diff --git a/tools/gnark/bn254/fr/permutation/permutation.go b/tools/gnark/bn254/fr/permutation/permutation.go deleted file mode 100644 index 0777420c..00000000 --- a/tools/gnark/bn254/fr/permutation/permutation.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package permutation - -import ( - "crypto/sha256" - "errors" - "math/big" - "math/bits" - - "github.com/consensys/gnark-crypto/ecc/bn254" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/fft" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/kzg" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrIncompatibleSize = errors.New("t1 and t2 should be of the same size") - ErrSize = errors.New("t1 and t2 should be of size a power of 2") - ErrPermutationProof = errors.New("permutation proof verification failed") - ErrGenerator = errors.New("wrong generator") -) - -// Proof proof that the commitments of t1 and t2 come from -// the same vector but permuted. -type Proof struct { - - // size of the polynomials - size int - - // generator of the fft domain, used for shifting the evaluation point - g fr.Element - - // commitments of t1 & t2, the permuted vectors, and z, the accumulation - // polynomial - t1, t2, z kzg.Digest - - // commitment to the quotient polynomial - q kzg.Digest - - // opening proofs of t1, t2, z, q (in that order) - batchedProof kzg.BatchOpeningProof - - // shifted opening proof of z - shiftedProof kzg.OpeningProof -} - -// evaluateAccumulationPolynomialBitReversed returns the accumulation polynomial in Lagrange basis. -func evaluateAccumulationPolynomialBitReversed(lt1, lt2 []fr.Element, epsilon fr.Element) []fr.Element { - - s := len(lt1) - z := make([]fr.Element, s) - d := make([]fr.Element, s) - z[0].SetOne() - d[0].SetOne() - nn := uint64(64 - bits.TrailingZeros64(uint64(s))) - var t fr.Element - for i := 0; i < s-1; i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) - z[_ii].Mul(&z[_i], t.Sub(&epsilon, <1[i])) - d[i+1].Mul(&d[i], t.Sub(&epsilon, <2[i])) - } - d = fr.BatchInvert(d) - for i := 0; i < s-1; i++ { - _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) - z[_ii].Mul(&z[_ii], &d[i+1]) - } - - return z -} - -// evaluateFirstPartNumReverse computes lt2*z(gx) - lt1*z -func evaluateFirstPartNumReverse(lt1, lt2, lz []fr.Element, epsilon fr.Element) []fr.Element { - - s := len(lt1) - res := make([]fr.Element, s) - var a, b fr.Element - nn := uint64(64 - bits.TrailingZeros64(uint64(s))) - for i := 0; i < s; i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) - a.Sub(&epsilon, <2[_i]) - a.Mul(&lz[_ii], &a) - b.Sub(&epsilon, <1[_i]) - b.Mul(&lz[_i], &b) - res[_i].Sub(&a, &b) - } - return res -} - -// evaluateSecondPartNumReverse computes L0 * (z-1) -func evaluateSecondPartNumReverse(lz []fr.Element, d *fft.Domain) []fr.Element { - - var tn, o, g fr.Element - o.SetOne() - tn.Exp(d.FrMultiplicativeGen, big.NewInt(int64(d.Cardinality))). - Sub(&tn, &o) - s := len(lz) - u := make([]fr.Element, s) - g.Set(&d.FrMultiplicativeGen) - for i := 0; i < s; i++ { - u[i].Sub(&g, &o) - g.Mul(&g, &d.Generator) - } - u = fr.BatchInvert(u) - res := make([]fr.Element, s) - nn := uint64(64 - bits.TrailingZeros64(uint64(s))) - for i := 0; i < s; i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - res[_i].Sub(&lz[_i], &o). - Mul(&res[_i], &u[i]). - Mul(&res[_i], &tn) - } - return res -} - -// Prove generates a proof that t1 and t2 are the same but permuted. -// The size of t1 and t2 should be the same and a power of 2. -func Prove(srs *kzg.SRS, t1, t2 []fr.Element) (Proof, error) { - - // res - var proof Proof - var err error - - // size checking - if len(t1) != len(t2) { - return proof, ErrIncompatibleSize - } - - // create the domains - d := fft.NewDomain(uint64(len(t1))) - if d.Cardinality != uint64(len(t1)) { - return proof, ErrSize - } - s := int(d.Cardinality) - proof.size = s - proof.g.Set(&d.Generator) - - // hash function for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "epsilon", "omega", "eta") - - // commit t1, t2 - ct1 := make([]fr.Element, s) - ct2 := make([]fr.Element, s) - copy(ct1, t1) - copy(ct2, t2) - d.FFTInverse(ct1, fft.DIF) - d.FFTInverse(ct2, fft.DIF) - fft.BitReverse(ct1) - fft.BitReverse(ct2) - proof.t1, err = kzg.Commit(ct1, srs) - if err != nil { - return proof, err - } - proof.t2, err = kzg.Commit(ct2, srs) - if err != nil { - return proof, err - } - - // derive challenge for z - epsilon, err := deriveRandomness(&fs, "epsilon", &proof.t1, &proof.t2) - if err != nil { - return proof, err - } - - // compute Z and commit it - cz := evaluateAccumulationPolynomialBitReversed(t1, t2, epsilon) - d.FFTInverse(cz, fft.DIT) - proof.z, err = kzg.Commit(cz, srs) - if err != nil { - return proof, err - } - lz := make([]fr.Element, s) - copy(lz, cz) - d.FFT(lz, fft.DIF, true) - - // compute the first part of the numerator - lt1 := make([]fr.Element, s) - lt2 := make([]fr.Element, s) - copy(lt1, ct1) - copy(lt2, ct2) - d.FFT(lt1, fft.DIF, true) - d.FFT(lt2, fft.DIF, true) - lsNumFirstPart := evaluateFirstPartNumReverse(lt1, lt2, lz, epsilon) - - // compute second part of the numerator - lsNum := evaluateSecondPartNumReverse(lz, d) - - // derive challenge used for the folding - omega, err := deriveRandomness(&fs, "omega", &proof.z) - if err != nil { - return proof, err - } - - // fold the numerator and divide it by x^n-1 - var t, one fr.Element - one.SetOne() - t.Exp(d.FrMultiplicativeGen, big.NewInt(int64(d.Cardinality))).Sub(&t, &one).Inverse(&t) - for i := 0; i < s; i++ { - lsNum[i].Mul(&omega, &lsNum[i]). - Add(&lsNum[i], &lsNumFirstPart[i]). - Mul(&lsNum[i], &t) - } - - // get the quotient and commit it - d.FFTInverse(lsNum, fft.DIT, true) - proof.q, err = kzg.Commit(lsNum, srs) - if err != nil { - return proof, err - } - - // derive the evaluation challenge - eta, err := deriveRandomness(&fs, "eta", &proof.q) - if err != nil { - return proof, err - } - - // compute the opening proofs - proof.batchedProof, err = kzg.BatchOpenSinglePoint( - [][]fr.Element{ - ct1, - ct2, - cz, - lsNum, - }, - []kzg.Digest{ - proof.t1, - proof.t2, - proof.z, - proof.q, - }, - eta, - hFunc, - srs, - ) - if err != nil { - return proof, err - } - - var shiftedEta fr.Element - shiftedEta.Mul(&eta, &d.Generator) - proof.shiftedProof, err = kzg.Open( - cz, - shiftedEta, - srs, - ) - if err != nil { - return proof, err - } - - // done - return proof, nil - -} - -// Verify verifies a permutation proof. -func Verify(srs *kzg.SRS, proof Proof) error { - - // hash function that is used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "epsilon", "omega", "eta") - - // derive the challenges - epsilon, err := deriveRandomness(&fs, "epsilon", &proof.t1, &proof.t2) - if err != nil { - return err - } - - omega, err := deriveRandomness(&fs, "omega", &proof.z) - if err != nil { - return err - } - - eta, err := deriveRandomness(&fs, "eta", &proof.q) - if err != nil { - return err - } - - // check the relation - bs := big.NewInt(int64(proof.size)) - var l0, a, b, one, rhs, lhs fr.Element - one.SetOne() - rhs.Exp(eta, bs). - Sub(&rhs, &one) - a.Sub(&eta, &one) - l0.Div(&rhs, &a) - rhs.Mul(&rhs, &proof.batchedProof.ClaimedValues[3]) - a.Sub(&epsilon, &proof.batchedProof.ClaimedValues[1]). - Mul(&a, &proof.shiftedProof.ClaimedValue) - b.Sub(&epsilon, &proof.batchedProof.ClaimedValues[0]). - Mul(&b, &proof.batchedProof.ClaimedValues[2]) - lhs.Sub(&a, &b) - a.Sub(&proof.batchedProof.ClaimedValues[2], &one). - Mul(&a, &l0). - Mul(&a, &omega) - lhs.Add(&a, &lhs) - if !lhs.Equal(&rhs) { - return ErrPermutationProof - } - - // check the opening proofs - err = kzg.BatchVerifySinglePoint( - []kzg.Digest{ - proof.t1, - proof.t2, - proof.z, - proof.q, - }, - &proof.batchedProof, - eta, - hFunc, - srs, - ) - if err != nil { - return err - } - - var shiftedEta fr.Element - shiftedEta.Mul(&eta, &proof.g) - err = kzg.Verify(&proof.z, &proof.shiftedProof, shiftedEta, srs) - if err != nil { - return err - } - - // check the generator is correct - var checkOrder fr.Element - checkOrder.Exp(proof.g, big.NewInt(int64(proof.size/2))) - if checkOrder.Equal(&one) { - return ErrGenerator - } - checkOrder.Square(&checkOrder) - if !checkOrder.Equal(&one) { - return ErrGenerator - } - - return nil -} - -// TODO put that in fiat-shamir package -func deriveRandomness(fs *fiatshamir.Transcript, challenge string, points ...*bn254.G1Affine) (fr.Element, error) { - - var buf [bn254.SizeOfG1AffineUncompressed]byte - var r fr.Element - - for _, p := range points { - buf = p.RawBytes() - if err := fs.Bind(challenge, buf[:]); err != nil { - return r, err - } - } - - b, err := fs.ComputeChallenge(challenge) - if err != nil { - return r, err - } - r.SetBytes(b) - return r, nil -} diff --git a/tools/gnark/bn254/fr/permutation/permutation_test.go b/tools/gnark/bn254/fr/permutation/permutation_test.go deleted file mode 100644 index 00699fa6..00000000 --- a/tools/gnark/bn254/fr/permutation/permutation_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package permutation - -import ( - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/kzg" -) - -func TestProof(t *testing.T) { - - srs, err := kzg.NewSRS(64, big.NewInt(13)) - if err != nil { - t.Fatal(err) - } - - a := make([]fr.Element, 8) - b := make([]fr.Element, 8) - - for i := 0; i < 8; i++ { - a[i].SetUint64(uint64(4*i + 1)) - } - for i := 0; i < 8; i++ { - b[i].Set(&a[(5*i)%8]) - } - - // correct proof - { - proof, err := Prove(srs, a, b) - if err != nil { - t.Fatal(err) - } - - err = Verify(srs, proof) - if err != nil { - t.Fatal(err) - } - } - - // wrong proof - { - a[0].SetRandom() - proof, err := Prove(srs, a, b) - if err != nil { - t.Fatal(err) - } - - err = Verify(srs, proof) - if err == nil { - t.Fatal(err) - } - } - -} - -func BenchmarkProver(b *testing.B) { - - srsSize := 1 << 15 - polySize := 1 << 14 - - srs, _ := kzg.NewSRS(uint64(srsSize), big.NewInt(13)) - a := make([]fr.Element, polySize) - c := make([]fr.Element, polySize) - - for i := 0; i < polySize; i++ { - a[i].SetUint64(uint64(i)) - } - for i := 0; i < polySize; i++ { - c[i].Set(&a[(5*i)%(polySize)]) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - Prove(srs, a, c) - } - -} diff --git a/tools/gnark/bn254/fr/plookup/doc.go b/tools/gnark/bn254/fr/plookup/doc.go deleted file mode 100644 index ec4b9128..00000000 --- a/tools/gnark/bn254/fr/plookup/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package plookup provides an API to build plookup proofs. -package plookup diff --git a/tools/gnark/bn254/fr/plookup/plookup_test.go b/tools/gnark/bn254/fr/plookup/plookup_test.go deleted file mode 100644 index 46a4e3a4..00000000 --- a/tools/gnark/bn254/fr/plookup/plookup_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package plookup - -import ( - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr/kzg" -) - -func TestLookupVector(t *testing.T) { - - lookupVector := make(Table, 8) - fvector := make(Table, 7) - for i := 0; i < 8; i++ { - lookupVector[i].SetUint64(uint64(2 * i)) - } - for i := 0; i < 7; i++ { - fvector[i].Set(&lookupVector[(4*i+1)%8]) - } - - srs, err := kzg.NewSRS(64, big.NewInt(13)) - if err != nil { - t.Fatal(err) - } - - // correct proof vector - { - proof, err := ProveLookupVector(srs, fvector, lookupVector) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupVector(srs, proof) - if err != nil { - t.Fatal(err) - } - } - - // wrong proofs vector - { - fvector[0].SetRandom() - - proof, err := ProveLookupVector(srs, fvector, lookupVector) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupVector(srs, proof) - if err == nil { - t.Fatal(err) - } - } - -} - -func TestLookupTable(t *testing.T) { - - srs, err := kzg.NewSRS(64, big.NewInt(13)) - if err != nil { - t.Fatal(err) - } - - lookupTable := make([]Table, 3) - fTable := make([]Table, 3) - for i := 0; i < 3; i++ { - lookupTable[i] = make(Table, 8) - fTable[i] = make(Table, 7) - for j := 0; j < 8; j++ { - lookupTable[i][j].SetUint64(uint64(2*i + j)) - } - for j := 0; j < 7; j++ { - fTable[i][j].Set(&lookupTable[i][(4*j+1)%8]) - } - } - - // correct proof - { - proof, err := ProveLookupTables(srs, fTable, lookupTable) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupTables(srs, proof) - if err != nil { - t.Fatal(err) - } - } - - // wrong proof - { - fTable[0][0].SetRandom() - proof, err := ProveLookupTables(srs, fTable, lookupTable) - if err != nil { - t.Fatal(err) - } - - err = VerifyLookupTables(srs, proof) - if err == nil { - t.Fatal(err) - } - } - -} - -func BenchmarkPlookup(b *testing.B) { - - srsSize := 1 << 15 - polySize := 1 << 14 - - srs, _ := kzg.NewSRS(uint64(srsSize), big.NewInt(13)) - a := make(Table, polySize) - c := make(Table, polySize) - - for i := 0; i < 1<<14; i++ { - a[i].SetUint64(uint64(i)) - c[i].SetUint64(uint64((8 * i) % polySize)) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - ProveLookupVector(srs, a, c) - } -} diff --git a/tools/gnark/bn254/fr/plookup/table.go b/tools/gnark/bn254/fr/plookup/table.go deleted file mode 100644 index bb2d7d6b..00000000 --- a/tools/gnark/bn254/fr/plookup/table.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package plookup - -import ( - "crypto/sha256" - "errors" - "math/big" - "sort" - - bn254 "github.com/consensys/gnark-crypto/ecc/bn254" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/fft" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/kzg" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/permutation" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrIncompatibleSize = errors.New("the tables in f and t are not of the same size") - ErrFoldedCommitment = errors.New("the folded commitment is malformed") - ErrNumberDigests = errors.New("proof.ts and proof.fs are not of the same length") -) - -// ProofLookupTables proofs that a list of tables -type ProofLookupTables struct { - - // commitments to the rows f - fs []kzg.Digest - - // commitments to the rows of t - ts []kzg.Digest - - // lookup proof for the f and t folded - foldedProof ProofLookupVector - - // proof that the ts folded correspond to t in the folded proof - permutationProof permutation.Proof -} - -// ProveLookupTables generates a proof that f, seen as a multi dimensional table, -// consists of vectors that are in t. In other words for each i, f[:][i] must be one -// of the t[:][j]. -// -// For instance, if t is the truth table of the XOR function, t will be populated such -// that t[:][i] contains the i-th entry of the truth table, so t[0][i] XOR t[1][i] = t[2][i]. -// -// The Table in f and t are supposed to be of the same size constant size. -func ProveLookupTables(srs *kzg.SRS, f, t []Table) (ProofLookupTables, error) { - - // res - proof := ProofLookupTables{} - var err error - - // hash function used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "lambda") - - // check the sizes - if len(f) != len(t) { - return proof, ErrIncompatibleSize - } - s := len(f[0]) - for i := 1; i < len(f); i++ { - if len(f[i]) != s { - return proof, ErrIncompatibleSize - } - } - s = len(t[0]) - for i := 1; i < len(t); i++ { - if len(t[i]) != s { - return proof, ErrIncompatibleSize - } - } - - // commit to the tables in f and t - nbRows := len(t) - proof.fs = make([]kzg.Digest, nbRows) - proof.ts = make([]kzg.Digest, nbRows) - _nbColumns := len(f[0]) + 1 - if _nbColumns < len(t[0]) { - _nbColumns = len(t[0]) - } - d := fft.NewDomain(uint64(_nbColumns)) - nbColumns := d.Cardinality - lfs := make([][]fr.Element, nbRows) - cfs := make([][]fr.Element, nbRows) - lts := make([][]fr.Element, nbRows) - cts := make([][]fr.Element, nbRows) - - for i := 0; i < nbRows; i++ { - - cfs[i] = make([]fr.Element, nbColumns) - lfs[i] = make([]fr.Element, nbColumns) - copy(cfs[i], f[i]) - copy(lfs[i], f[i]) - for j := len(f[i]); j < int(nbColumns); j++ { - cfs[i][j] = f[i][len(f[i])-1] - lfs[i][j] = f[i][len(f[i])-1] - } - d.FFTInverse(cfs[i], fft.DIF) - fft.BitReverse(cfs[i]) - proof.fs[i], err = kzg.Commit(cfs[i], srs) - if err != nil { - return proof, err - } - - cts[i] = make([]fr.Element, nbColumns) - lts[i] = make([]fr.Element, nbColumns) - copy(cts[i], t[i]) - copy(lts[i], t[i]) - for j := len(t[i]); j < int(d.Cardinality); j++ { - cts[i][j] = t[i][len(t[i])-1] - lts[i][j] = t[i][len(t[i])-1] - } - d.FFTInverse(cts[i], fft.DIF) - fft.BitReverse(cts[i]) - proof.ts[i], err = kzg.Commit(cts[i], srs) - if err != nil { - return proof, err - } - } - - // fold f and t - comms := make([]*kzg.Digest, 2*nbRows) - for i := 0; i < nbRows; i++ { - comms[i] = new(kzg.Digest) - comms[i].Set(&proof.fs[i]) - comms[nbRows+i] = new(kzg.Digest) - comms[nbRows+i].Set(&proof.ts[i]) - } - lambda, err := deriveRandomness(&fs, "lambda", comms...) - if err != nil { - return proof, err - } - foldedf := make(Table, nbColumns) - foldedt := make(Table, nbColumns) - for i := 0; i < int(nbColumns); i++ { - for j := nbRows - 1; j >= 0; j-- { - foldedf[i].Mul(&foldedf[i], &lambda). - Add(&foldedf[i], &lfs[j][i]) - foldedt[i].Mul(&foldedt[i], &lambda). - Add(&foldedt[i], <s[j][i]) - } - } - - // generate a proof of permutation of the foldedt and sort(foldedt) - foldedtSorted := make(Table, nbColumns) - copy(foldedtSorted, foldedt) - sort.Sort(foldedtSorted) - proof.permutationProof, err = permutation.Prove(srs, foldedt, foldedtSorted) - if err != nil { - return proof, err - } - - // call plookupVector, on foldedf[:len(foldedf)-1] to ensure that the domain size - // in ProveLookupVector is the same as d's - proof.foldedProof, err = ProveLookupVector(srs, foldedf[:len(foldedf)-1], foldedt) - - return proof, err -} - -// VerifyLookupTables verifies that a ProofLookupTables proof is correct. -func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { - - // hash function used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "lambda") - - // check that the number of digests is the same - if len(proof.fs) != len(proof.ts) { - return ErrNumberDigests - } - - // fold the commitments fs and ts - nbRows := len(proof.fs) - comms := make([]*kzg.Digest, 2*nbRows) - for i := 0; i < nbRows; i++ { - comms[i] = &proof.fs[i] - comms[i+nbRows] = &proof.ts[i] - } - lambda, err := deriveRandomness(&fs, "lambda", comms...) - if err != nil { - return err - } - - // fold the commitments of the rows of t and f - var comf, comt kzg.Digest - comf.Set(&proof.fs[nbRows-1]) - comt.Set(&proof.ts[nbRows-1]) - var blambda big.Int - lambda.ToBigIntRegular(&blambda) - for i := nbRows - 2; i >= 0; i-- { - comf.ScalarMultiplication(&comf, &blambda). - Add(&comf, &proof.fs[i]) - comt.ScalarMultiplication(&comt, &blambda). - Add(&comt, &proof.ts[i]) - } - - // check that the folded commitment of the fs correspond to foldedProof.f - if !comf.Equal(&proof.foldedProof.f) { - return ErrFoldedCommitment - } - - // check that the folded commitment of the ts is a permutation of proof.FoldedProof.t - err = permutation.Verify(srs, proof.permutationProof) - if err != nil { - return err - } - - // verify the inner proof - return VerifyLookupVector(srs, proof.foldedProof) -} - -// TODO put that in fiat-shamir package -func deriveRandomness(fs *fiatshamir.Transcript, challenge string, points ...*bn254.G1Affine) (fr.Element, error) { - - var buf [bn254.SizeOfG1AffineUncompressed]byte - var r fr.Element - - for _, p := range points { - buf = p.RawBytes() - if err := fs.Bind(challenge, buf[:]); err != nil { - return r, err - } - } - - b, err := fs.ComputeChallenge(challenge) - if err != nil { - return r, err - } - r.SetBytes(b) - return r, nil -} diff --git a/tools/gnark/bn254/fr/plookup/vector.go b/tools/gnark/bn254/fr/plookup/vector.go deleted file mode 100644 index 5ebbce5c..00000000 --- a/tools/gnark/bn254/fr/plookup/vector.go +++ /dev/null @@ -1,735 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package plookup - -import ( - "crypto/sha256" - "errors" - "math/big" - "math/bits" - "sort" - - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/fft" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/kzg" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" -) - -var ( - ErrNotInTable = errors.New("some value in the vector is not in the lookup table") - ErrPlookupVerification = errors.New("plookup verification failed") - ErrGenerator = errors.New("wrong generator") -) - -type Table []fr.Element - -// Len is the number of elements in the collection. -func (t Table) Len() int { - return len(t) -} - -// Less reports whether the element with -// index i should sort before the element with index j. -func (t Table) Less(i, j int) bool { - return t[i].Cmp(&t[j]) == -1 -} - -// Swap swaps the elements with indexes i and j. -func (t Table) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -// Proof Plookup proof, containing opening proofs -type ProofLookupVector struct { - - // size of the system - size uint64 - - // generator of the fft domain, used for shifting the evaluation point - g fr.Element - - // Commitments to h1, h2, t, z, f, h - h1, h2, t, z, f, h kzg.Digest - - // Batch opening proof of h1, h2, z, t - BatchedProof kzg.BatchOpeningProof - - // Batch opening proof of h1, h2, z shifted by g - BatchedProofShifted kzg.BatchOpeningProof -} - -// evaluateAccumulationPolynomial computes Z, in Lagrange basis. Z is the accumulation of the partial -// ratios of 2 fully split polynomials (cf https://eprint.iacr.org/2020/315.pdf) -// * lf is the list of values that should be in lt -// * lt is the lookup table -// * lh1, lh2 is lf sorted by lt split in 2 overlapping slices -// * beta, gamma are challenges (Schwartz-zippel: they are the random evaluations point) -func evaluateAccumulationPolynomial(lf, lt, lh1, lh2 []fr.Element, beta, gamma fr.Element) []fr.Element { - - z := make([]fr.Element, len(lt)) - - n := len(lt) - d := make([]fr.Element, n-1) - var u, c fr.Element - c.SetOne(). - Add(&c, &beta). - Mul(&c, &gamma) - for i := 0; i < n-1; i++ { - - d[i].Mul(&beta, &lh1[i+1]). - Add(&d[i], &lh1[i]). - Add(&d[i], &c) - - u.Mul(&beta, &lh2[i+1]). - Add(&u, &lh2[i]). - Add(&u, &c) - - d[i].Mul(&d[i], &u) - } - d = fr.BatchInvert(d) - - z[0].SetOne() - var a, b, e fr.Element - e.SetOne().Add(&e, &beta) - for i := 0; i < n-1; i++ { - - a.Add(&gamma, &lf[i]) - - b.Mul(&beta, <[i+1]). - Add(&b, <[i]). - Add(&b, &c) - - a.Mul(&a, &b). - Mul(&a, &e) - - z[i+1].Mul(&z[i], &a). - Mul(&z[i+1], &d[i]) - } - - return z -} - -// evaluateNumBitReversed computes the evaluation (shifted, bit reversed) of h where -// h = (x-1)*z*(1+\beta)*(\gamma+f)*(\gamma(1+\beta) + t+ \beta*t(gX)) - -// -// (x-1)*z(gX)*(\gamma(1+\beta) + h_{1} + \beta*h_{1}(gX))*(\gamma(1+\beta) + h_{2} + \beta*h_{2}(gX) ) -// -// * cz, ch1, ch2, ct, cf are the polynomials z, h1, h2, t, f in canonical basis -// * _lz, _lh1, _lh2, _lt, _lf are the polynomials z, h1, h2, t, f in shifted Lagrange basis (domainBig) -// * beta, gamma are the challenges -// * it returns h in canonical basis -func evaluateNumBitReversed(_lz, _lh1, _lh2, _lt, _lf []fr.Element, beta, gamma fr.Element, domainBig *fft.Domain) []fr.Element { - - // result - s := int(domainBig.Cardinality) - num := make([]fr.Element, domainBig.Cardinality) - - var u, onePlusBeta, GammaTimesOnePlusBeta, m, n, one fr.Element - - one.SetOne() - onePlusBeta.Add(&one, &beta) - GammaTimesOnePlusBeta.Mul(&onePlusBeta, &gamma) - - g := make([]fr.Element, s) - g[0].Set(&domainBig.FrMultiplicativeGen) - for i := 1; i < s; i++ { - g[i].Mul(&g[i-1], &domainBig.Generator) - } - - var gg fr.Element - expo := big.NewInt(int64(domainBig.Cardinality>>1 - 1)) - gg.Square(&domainBig.Generator).Exp(gg, expo) - - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - for i := 0; i < s; i++ { - - _i := int(bits.Reverse64(uint64(i)) >> nn) - _is := int(bits.Reverse64(uint64((i+2)%s)) >> nn) - - // m = z*(1+\beta)*(\gamma+f)*(\gamma(1+\beta) + t+ \beta*t(gX)) - m.Mul(&onePlusBeta, &_lz[_i]) - u.Add(&gamma, &_lf[_i]) - m.Mul(&m, &u) - u.Mul(&beta, &_lt[_is]). - Add(&u, &_lt[_i]). - Add(&u, &GammaTimesOnePlusBeta) - m.Mul(&m, &u) - - // n = z(gX)*(\gamma(1+\beta) + h_{1} + \beta*h_{1}(gX))*(\gamma(1+\beta) + h_{2} + \beta*h_{2}(gX) - n.Mul(&beta, &_lh1[_is]). - Add(&n, &_lh1[_i]). - Add(&n, &GammaTimesOnePlusBeta) - u.Mul(&beta, &_lh2[_is]). - Add(&u, &_lh2[_i]). - Add(&u, &GammaTimesOnePlusBeta) - n.Mul(&n, &u). - Mul(&n, &_lz[_is]) - - // (x-gg**(n-1))*(m-n) - num[_i].Sub(&m, &n) - u.Sub(&g[i], &gg) - num[_i].Mul(&num[_i], &u) - - } - - return num -} - -// evaluateXnMinusOneDomainBig returns the evaluation of (x^{n}-1) on FrMultiplicativeGen*< g > -func evaluateXnMinusOneDomainBig(domainBig *fft.Domain) [2]fr.Element { - - sizeDomainSmall := domainBig.Cardinality / 2 - - var one fr.Element - one.SetOne() - - // x^{n}-1 on FrMultiplicativeGen*< g > - var res [2]fr.Element - var shift fr.Element - shift.Exp(domainBig.FrMultiplicativeGen, big.NewInt(int64(sizeDomainSmall))) - res[0].Sub(&shift, &one) - res[1].Add(&shift, &one).Neg(&res[1]) - - return res - -} - -// evaluateL0DomainBig returns the evaluation of (x^{n}-1)/(x-1) on -// x^{n}-1 on FrMultiplicativeGen*< g > -func evaluateL0DomainBig(domainBig *fft.Domain) ([2]fr.Element, []fr.Element) { - - var one fr.Element - one.SetOne() - - // x^{n}-1 on FrMultiplicativeGen*< g > - xnMinusOne := evaluateXnMinusOneDomainBig(domainBig) - - // 1/(x-1) on FrMultiplicativeGen*< g > - var acc fr.Element - denL0 := make([]fr.Element, domainBig.Cardinality) - acc.Set(&domainBig.FrMultiplicativeGen) - for i := 0; i < int(domainBig.Cardinality); i++ { - denL0[i].Sub(&acc, &one) - acc.Mul(&acc, &domainBig.Generator) - } - denL0 = fr.BatchInvert(denL0) - - return xnMinusOne, denL0 -} - -// evaluationLnDomainBig returns the evaluation of (x^{n}-1)/(x-g^{n-1}) on -// x^{n}-1 on FrMultiplicativeGen*< g > -func evaluationLnDomainBig(domainBig *fft.Domain) ([2]fr.Element, []fr.Element) { - - sizeDomainSmall := domainBig.Cardinality / 2 - - var one fr.Element - one.SetOne() - - // x^{n}-1 on FrMultiplicativeGen*< g > - numLn := evaluateXnMinusOneDomainBig(domainBig) - - // 1/(x-g^{n-1}) on FrMultiplicativeGen*< g > - var gg, acc fr.Element - gg.Square(&domainBig.Generator).Exp(gg, big.NewInt(int64(sizeDomainSmall-1))) - denLn := make([]fr.Element, domainBig.Cardinality) - acc.Set(&domainBig.FrMultiplicativeGen) - for i := 0; i < int(domainBig.Cardinality); i++ { - denLn[i].Sub(&acc, &gg) - acc.Mul(&acc, &domainBig.Generator) - } - denLn = fr.BatchInvert(denLn) - - return numLn, denLn - -} - -// evaluateZStartsByOneBitReversed returns l0 * (z-1), in Lagrange basis and bit reversed order -func evaluateZStartsByOneBitReversed(lsZBitReversed []fr.Element, domainBig *fft.Domain) []fr.Element { - - var one fr.Element - one.SetOne() - - res := make([]fr.Element, domainBig.Cardinality) - - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - xnMinusOne, denL0 := evaluateL0DomainBig(domainBig) - - for i := 0; i < len(lsZBitReversed); i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - res[_i].Sub(&lsZBitReversed[_i], &one). - Mul(&res[_i], &xnMinusOne[i%2]). - Mul(&res[_i], &denL0[i]) - } - - return res -} - -// evaluateZEndsByOneBitReversed returns ln * (z-1), in Lagrange basis and bit reversed order -func evaluateZEndsByOneBitReversed(lsZBitReversed []fr.Element, domainBig *fft.Domain) []fr.Element { - - var one fr.Element - one.SetOne() - - numLn, denLn := evaluationLnDomainBig(domainBig) - - res := make([]fr.Element, len(lsZBitReversed)) - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - for i := 0; i < len(lsZBitReversed); i++ { - _i := int(bits.Reverse64(uint64(i)) >> nn) - res[_i].Sub(&lsZBitReversed[_i], &one). - Mul(&res[_i], &numLn[i%2]). - Mul(&res[_i], &denLn[i]) - } - - return res -} - -// evaluateOverlapH1h2BitReversed returns ln * (h1 - h2(g.x)), in Lagrange basis and bit reversed order -func evaluateOverlapH1h2BitReversed(_lh1, _lh2 []fr.Element, domainBig *fft.Domain) []fr.Element { - - var one fr.Element - one.SetOne() - - numLn, denLn := evaluationLnDomainBig(domainBig) - - res := make([]fr.Element, len(_lh1)) - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - s := len(_lh1) - for i := 0; i < s; i++ { - - _i := int(bits.Reverse64(uint64(i)) >> nn) - _is := int(bits.Reverse64(uint64((i+2)%s)) >> nn) - - res[_i].Sub(&_lh1[_i], &_lh2[_is]). - Mul(&res[_i], &numLn[i%2]). - Mul(&res[_i], &denLn[i]) - } - - return res -} - -// computeQuotientCanonical computes the full quotient of the plookup protocol. -// * alpha is the challenge to fold the numerator -// * lh, lh0, lhn, lh1h2 are the various pieces of the numerator (Lagrange shifted form, bit reversed order) -// * domainBig fft domain -// It returns the quotient, in canonical basis -func computeQuotientCanonical(alpha fr.Element, lh, lh0, lhn, lh1h2 []fr.Element, domainBig *fft.Domain) []fr.Element { - - sizeDomainBig := int(domainBig.Cardinality) - res := make([]fr.Element, sizeDomainBig) - - var one fr.Element - one.SetOne() - - numLn := evaluateXnMinusOneDomainBig(domainBig) - numLn[0].Inverse(&numLn[0]) - numLn[1].Inverse(&numLn[1]) - nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) - - for i := 0; i < sizeDomainBig; i++ { - - _i := int(bits.Reverse64(uint64(i)) >> nn) - - res[_i].Mul(&lh1h2[_i], &alpha). - Add(&res[_i], &lhn[_i]). - Mul(&res[_i], &alpha). - Add(&res[_i], &lh0[_i]). - Mul(&res[_i], &alpha). - Add(&res[_i], &lh[_i]). - Mul(&res[_i], &numLn[i%2]) - } - - domainBig.FFTInverse(res, fft.DIT, true) - - return res -} - -// ProveLookupVector returns proof that the values in f are in t. -// -// /!\IMPORTANT/!\ -// -// If the table t is already commited somewhere (which is the normal workflow -// before generating a lookup proof), the commitment needs to be done on the -// table sorted. Otherwise the commitment in proof.t will not be the same as -// the public commitment: it will contain the same values, but permuted. -func ProveLookupVector(srs *kzg.SRS, f, t Table) (ProofLookupVector, error) { - - // res - var proof ProofLookupVector - var err error - - // hash function used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "beta", "gamma", "alpha", "nu") - - // create domains - var domainSmall *fft.Domain - if len(t) <= len(f) { - domainSmall = fft.NewDomain(uint64(len(f) + 1)) - } else { - domainSmall = fft.NewDomain(uint64(len(t))) - } - sizeDomainSmall := int(domainSmall.Cardinality) - - // set the size - proof.size = domainSmall.Cardinality - - // set the generator - proof.g.Set(&domainSmall.Generator) - - // resize f and t - // note: the last element of lf does not matter - lf := make([]fr.Element, sizeDomainSmall) - lt := make([]fr.Element, sizeDomainSmall) - cf := make([]fr.Element, sizeDomainSmall) - ct := make([]fr.Element, sizeDomainSmall) - copy(lt, t) - copy(lf, f) - for i := len(f); i < sizeDomainSmall; i++ { - lf[i] = f[len(f)-1] - } - for i := len(t); i < sizeDomainSmall; i++ { - lt[i] = t[len(t)-1] - } - sort.Sort(Table(lt)) - copy(ct, lt) - copy(cf, lf) - domainSmall.FFTInverse(ct, fft.DIF) - domainSmall.FFTInverse(cf, fft.DIF) - fft.BitReverse(ct) - fft.BitReverse(cf) - proof.t, err = kzg.Commit(ct, srs) - if err != nil { - return proof, err - } - proof.f, err = kzg.Commit(cf, srs) - if err != nil { - return proof, err - } - - // write f sorted by t - lfSortedByt := make(Table, 2*domainSmall.Cardinality-1) - copy(lfSortedByt, lt) - copy(lfSortedByt[domainSmall.Cardinality:], lf) - sort.Sort(lfSortedByt) - - // compute h1, h2, commit to them - lh1 := make([]fr.Element, sizeDomainSmall) - lh2 := make([]fr.Element, sizeDomainSmall) - ch1 := make([]fr.Element, sizeDomainSmall) - ch2 := make([]fr.Element, sizeDomainSmall) - copy(lh1, lfSortedByt[:sizeDomainSmall]) - copy(lh2, lfSortedByt[sizeDomainSmall-1:]) - - copy(ch1, lfSortedByt[:sizeDomainSmall]) - copy(ch2, lfSortedByt[sizeDomainSmall-1:]) - domainSmall.FFTInverse(ch1, fft.DIF) - domainSmall.FFTInverse(ch2, fft.DIF) - fft.BitReverse(ch1) - fft.BitReverse(ch2) - - proof.h1, err = kzg.Commit(ch1, srs) - if err != nil { - return proof, err - } - proof.h2, err = kzg.Commit(ch2, srs) - if err != nil { - return proof, err - } - - // derive beta, gamma - beta, err := deriveRandomness(&fs, "beta", &proof.t, &proof.f, &proof.h1, &proof.h2) - if err != nil { - return proof, err - } - gamma, err := deriveRandomness(&fs, "gamma") - if err != nil { - return proof, err - } - - // Compute to Z - lz := evaluateAccumulationPolynomial(lf, lt, lh1, lh2, beta, gamma) - cz := make([]fr.Element, len(lz)) - copy(cz, lz) - domainSmall.FFTInverse(cz, fft.DIF) - fft.BitReverse(cz) - proof.z, err = kzg.Commit(cz, srs) - if err != nil { - return proof, err - } - - // prepare data for computing the quotient - // compute the numerator - s := domainSmall.Cardinality - domainBig := fft.NewDomain(uint64(2 * s)) - - _lz := make([]fr.Element, 2*s) - _lh1 := make([]fr.Element, 2*s) - _lh2 := make([]fr.Element, 2*s) - _lt := make([]fr.Element, 2*s) - _lf := make([]fr.Element, 2*s) - copy(_lz, cz) - copy(_lh1, ch1) - copy(_lh2, ch2) - copy(_lt, ct) - copy(_lf, cf) - domainBig.FFT(_lz, fft.DIF, true) - domainBig.FFT(_lh1, fft.DIF, true) - domainBig.FFT(_lh2, fft.DIF, true) - domainBig.FFT(_lt, fft.DIF, true) - domainBig.FFT(_lf, fft.DIF, true) - - // compute h - lh := evaluateNumBitReversed(_lz, _lh1, _lh2, _lt, _lf, beta, gamma, domainBig) - - // compute l0*(z-1) - lh0 := evaluateZStartsByOneBitReversed(_lz, domainBig) - - // compute ln(z-1) - lhn := evaluateZEndsByOneBitReversed(_lz, domainBig) - - // compute ln*(h1-h2(g*X)) - lh1h2 := evaluateOverlapH1h2BitReversed(_lh1, _lh2, domainBig) - - // compute the quotient - alpha, err := deriveRandomness(&fs, "alpha", &proof.z) - if err != nil { - return proof, err - } - ch := computeQuotientCanonical(alpha, lh, lh0, lhn, lh1h2, domainBig) - proof.h, err = kzg.Commit(ch, srs) - if err != nil { - return proof, err - } - - // build the opening proofs - nu, err := deriveRandomness(&fs, "nu", &proof.h) - if err != nil { - return proof, err - } - proof.BatchedProof, err = kzg.BatchOpenSinglePoint( - [][]fr.Element{ - ch1, - ch2, - ct, - cz, - cf, - ch, - }, - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - proof.f, - proof.h, - }, - nu, - hFunc, - srs, - ) - if err != nil { - return proof, err - } - - nu.Mul(&nu, &domainSmall.Generator) - proof.BatchedProofShifted, err = kzg.BatchOpenSinglePoint( - [][]fr.Element{ - ch1, - ch2, - ct, - cz, - }, - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - }, - nu, - hFunc, - srs, - ) - if err != nil { - return proof, err - } - - return proof, nil -} - -// VerifyLookupVector verifies that a ProofLookupVector proof is correct -func VerifyLookupVector(srs *kzg.SRS, proof ProofLookupVector) error { - - // hash function that is used for Fiat Shamir - hFunc := sha256.New() - - // transcript to derive the challenge - fs := fiatshamir.NewTranscript(hFunc, "beta", "gamma", "alpha", "nu") - - // derive the various challenges - beta, err := deriveRandomness(&fs, "beta", &proof.t, &proof.f, &proof.h1, &proof.h2) - if err != nil { - return err - } - - gamma, err := deriveRandomness(&fs, "gamma") - if err != nil { - return err - } - - alpha, err := deriveRandomness(&fs, "alpha", &proof.z) - if err != nil { - return err - } - - nu, err := deriveRandomness(&fs, "nu", &proof.h) - if err != nil { - return err - } - - // check opening proofs - err = kzg.BatchVerifySinglePoint( - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - proof.f, - proof.h, - }, - &proof.BatchedProof, - nu, - hFunc, - srs, - ) - if err != nil { - return err - } - - // shift the point and verify shifted proof - var shiftedNu fr.Element - shiftedNu.Mul(&nu, &proof.g) - err = kzg.BatchVerifySinglePoint( - []kzg.Digest{ - proof.h1, - proof.h2, - proof.t, - proof.z, - }, - &proof.BatchedProofShifted, - shiftedNu, - hFunc, - srs, - ) - if err != nil { - return err - } - - // check the generator is correct - var checkOrder, one fr.Element - one.SetOne() - checkOrder.Exp(proof.g, big.NewInt(int64(proof.size/2))) - if checkOrder.Equal(&one) { - return ErrGenerator - } - checkOrder.Square(&checkOrder) - if !checkOrder.Equal(&one) { - return ErrGenerator - } - - // check polynomial relation using Schwartz Zippel - var lhs, rhs, nun, g, _g, a, v, w fr.Element - g.Exp(proof.g, big.NewInt(int64(proof.size-1))) - - v.Add(&one, &beta) - w.Mul(&v, &gamma) - - // h(ν) where - // h = (xⁿ⁻¹-1)*z*(1+β)*(γ+f)*(γ(1+β) + t+ β*t(gX)) - - // (xⁿ⁻¹-1)*z(gX)*(γ(1+β) + h₁ + β*h₁(gX))*(γ(1+β) + h₂ + β*h₂(gX) ) - lhs.Sub(&nu, &g). // (ν-gⁿ⁻¹) - Mul(&lhs, &proof.BatchedProof.ClaimedValues[3]). - Mul(&lhs, &v) - a.Add(&gamma, &proof.BatchedProof.ClaimedValues[4]) - lhs.Mul(&lhs, &a) - a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[2]). - Add(&a, &proof.BatchedProof.ClaimedValues[2]). - Add(&a, &w) - lhs.Mul(&lhs, &a) - - rhs.Sub(&nu, &g). - Mul(&rhs, &proof.BatchedProofShifted.ClaimedValues[3]) - a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[0]). - Add(&a, &proof.BatchedProof.ClaimedValues[0]). - Add(&a, &w) - rhs.Mul(&rhs, &a) - a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[1]). - Add(&a, &proof.BatchedProof.ClaimedValues[1]). - Add(&a, &w) - rhs.Mul(&rhs, &a) - - lhs.Sub(&lhs, &rhs) - - // check consistancy of bounds - var l0, ln, d1, d2 fr.Element - l0.Exp(nu, big.NewInt(int64(proof.size))).Sub(&l0, &one) - ln.Set(&l0) - d1.Sub(&nu, &one) - d2.Sub(&nu, &g) - l0.Div(&l0, &d1) // (νⁿ-1)/(ν-1) - ln.Div(&ln, &d2) // (νⁿ-1)/(ν-gⁿ⁻¹) - - // l₀*(z-1) = (νⁿ-1)/(ν-1)*(z-1) - var l0z fr.Element - l0z.Sub(&proof.BatchedProof.ClaimedValues[3], &one). - Mul(&l0z, &l0) - - // lₙ*(z-1) = (νⁿ-1)/(ν-gⁿ⁻¹)*(z-1) - var lnz fr.Element - lnz.Sub(&proof.BatchedProof.ClaimedValues[3], &one). - Mul(&ln, &lnz) - - // lₙ*(h1 - h₂(g.x)) - var lnh1h2 fr.Element - lnh1h2.Sub(&proof.BatchedProof.ClaimedValues[0], &proof.BatchedProofShifted.ClaimedValues[1]). - Mul(&lnh1h2, &ln) - - // fold the numerator - lnh1h2.Mul(&lnh1h2, &alpha). - Add(&lnh1h2, &lnz). - Mul(&lnh1h2, &alpha). - Add(&lnh1h2, &l0z). - Mul(&lnh1h2, &alpha). - Add(&lnh1h2, &lhs) - - // (xⁿ-1) * h(x) evaluated at ν - nun.Exp(nu, big.NewInt(int64(proof.size))) - _g.Sub(&nun, &one) - _g.Mul(&proof.BatchedProof.ClaimedValues[5], &_g) - if !lnh1h2.Equal(&_g) { - return ErrPlookupVerification - } - - return nil -} diff --git a/tools/gnark/bn254/fr/polynomial/doc.go b/tools/gnark/bn254/fr/polynomial/doc.go deleted file mode 100644 index 83479b05..00000000 --- a/tools/gnark/bn254/fr/polynomial/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -// Package polynomial provides polynomial methods and commitment schemes. -package polynomial diff --git a/tools/gnark/bn254/fr/polynomial/multilin.go b/tools/gnark/bn254/fr/polynomial/multilin.go deleted file mode 100644 index 0276fca1..00000000 --- a/tools/gnark/bn254/fr/polynomial/multilin.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package polynomial - -import ( - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "math/bits" -) - -// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial -// The variables are X₁ through Xₙ where n = log(len(.)) -// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ) -// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial -type MultiLin []fr.Element - -// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r -func (m *MultiLin) Fold(r fr.Element) { - mid := len(*m) / 2 - - bottom, top := (*m)[:mid], (*m)[mid:] - - // updating bookkeeping table - // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0)) - // the following loop computes the evaluations of f(r) accordingly: - // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ)) - for i := 0; i < mid; i++ { - // table[i] ← table[i] + r (table[i + mid] - table[i]) - top[i].Sub(&top[i], &bottom[i]) - top[i].Mul(&top[i], &r) - bottom[i].Add(&bottom[i], &top[i]) - } - - *m = (*m)[:mid] -} - -func (m MultiLin) Sum() fr.Element { - s := m[0] - for i := 1; i < len(m); i++ { - s.Add(&s, &m[i]) - } - return s -} - -func _clone(m MultiLin, p *Pool) MultiLin { - if p == nil { - return m.Clone() - } else { - return p.Clone(m) - } -} - -func _dump(m MultiLin, p *Pool) { - if p != nil { - p.Dump(m) - } -} - -// Evaluate extrapolate the value of the multilinear polynomial corresponding to m -// on the given coordinates -func (m MultiLin) Evaluate(coordinates []fr.Element, p *Pool) fr.Element { - // Folding is a mutating operation - bkCopy := _clone(m, p) - - // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable) - for _, r := range coordinates { - bkCopy.Fold(r) - } - - result := bkCopy[0] - - _dump(bkCopy, p) - return result -} - -// Clone creates a deep copy of a bookkeeping table. -// Both multilinear interpolation and sumcheck require folding an underlying -// array, but folding changes the array. To do both one requires a deep copy -// of the bookkeeping table. -func (m MultiLin) Clone() MultiLin { - res := make(MultiLin, len(m)) - copy(res, m) - return res -} - -// Add two bookKeepingTables -func (m *MultiLin) Add(left, right MultiLin) { - size := len(left) - // Check that left and right have the same size - if len(right) != size || len(*m) != size { - panic("left, right and destination must have the right size") - } - - // Add elementwise - for i := 0; i < size; i++ { - (*m)[i].Add(&left[i], &right[i]) - } -} - -// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ) -// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates -// -// _________________ -// | | | -// | 0 | 1 | -// |_______|_______| -// y | | | -// | 1 | 0 | -// |_______|_______| -// -// x -// -// In other words the polynomial evaluated here is the multilinear extrapolation of -// one that evaluates to q' == h' for vectors q', h' of binary values -func EvalEq(q, h []fr.Element) fr.Element { - var res, nxt, one, sum fr.Element - one.SetOne() - for i := 0; i < len(q); i++ { - nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ - nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ - nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ - sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel? - - if i == 0 { - res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ - } else { - nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ - res.Mul(&res, &nxt) // res <- res * nxt - } - } - return res -} - -// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0] -func (m *MultiLin) Eq(q []fr.Element) { - n := len(q) - - if len(*m) != 1<= 0; i-- { - res.Mul(&res, v) - res.Add(&res, &(*p)[i]) - } - - return res -} - -// Clone returns a copy of the polynomial -func (p *Polynomial) Clone() Polynomial { - _p := make(Polynomial, len(*p)) - copy(_p, *p) - return _p -} - -// Set to another polynomial -func (p *Polynomial) Set(p1 Polynomial) { - if len(*p) != len(p1) { - *p = p1.Clone() - return - } - - for i := 0; i < len(p1); i++ { - (*p)[i].Set(&p1[i]) - } -} - -// AddConstantInPlace adds a constant to the polynomial, modifying p -func (p *Polynomial) AddConstantInPlace(c *fr.Element) { - for i := 0; i < len(*p); i++ { - (*p)[i].Add(&(*p)[i], c) - } -} - -// SubConstantInPlace subs a constant to the polynomial, modifying p -func (p *Polynomial) SubConstantInPlace(c *fr.Element) { - for i := 0; i < len(*p); i++ { - (*p)[i].Sub(&(*p)[i], c) - } -} - -// ScaleInPlace multiplies p by v, modifying p -func (p *Polynomial) ScaleInPlace(c *fr.Element) { - for i := 0; i < len(*p); i++ { - (*p)[i].Mul(&(*p)[i], c) - } -} - -// Scale multiplies p0 by v, storing the result in p -func (p *Polynomial) Scale(c *fr.Element, p0 Polynomial) { - if len(*p) != len(p0) { - *p = make(Polynomial, len(p0)) - } - for i := 0; i < len(p0); i++ { - (*p)[i].Mul(c, &p0[i]) - } -} - -// Add adds p1 to p2 -// This function allocates a new slice unless p == p1 or p == p2 -func (p *Polynomial) Add(p1, p2 Polynomial) *Polynomial { - - bigger := p1 - smaller := p2 - if len(bigger) < len(smaller) { - bigger, smaller = smaller, bigger - } - - if len(*p) == len(bigger) && (&(*p)[0] == &bigger[0]) { - for i := 0; i < len(smaller); i++ { - (*p)[i].Add(&(*p)[i], &smaller[i]) - } - return p - } - - if len(*p) == len(smaller) && (&(*p)[0] == &smaller[0]) { - for i := 0; i < len(smaller); i++ { - (*p)[i].Add(&(*p)[i], &bigger[i]) - } - *p = append(*p, bigger[len(smaller):]...) - return p - } - - res := make(Polynomial, len(bigger)) - copy(res, bigger) - for i := 0; i < len(smaller); i++ { - res[i].Add(&res[i], &smaller[i]) - } - *p = res - return p -} - -// Sub subtracts p2 from p1 -// TODO make interface more consistent with Add -func (p *Polynomial) Sub(p1, p2 Polynomial) *Polynomial { - if len(p1) != len(p2) || len(p2) != len(*p) { - return nil - } - for i := 0; i < len(*p); i++ { - (*p)[i].Sub(&p1[i], &p2[i]) - } - return p -} - -// Equal checks equality between two polynomials -func (p *Polynomial) Equal(p1 Polynomial) bool { - if (*p == nil) != (p1 == nil) { - return false - } - - if len(*p) != len(p1) { - return false - } - - for i := range p1 { - if !(*p)[i].Equal(&p1[i]) { - return false - } - } - - return true -} - -func (p Polynomial) SetZero() { - for i := 0; i < len(p); i++ { - p[i].SetZero() - } -} - -func (p Polynomial) Text(base int) string { - - var builder strings.Builder - - first := true - for d := len(p) - 1; d >= 0; d-- { - if p[d].IsZero() { - continue - } - - pD := p[d] - pDText := pD.Text(base) - - initialLen := builder.Len() - - if pDText[0] == '-' { - pDText = pDText[1:] - if first { - builder.WriteString("-") - } else { - builder.WriteString(" - ") - } - } else if !first { - builder.WriteString(" + ") - } - - first = false - - if !pD.IsOne() || d == 0 { - builder.WriteString(pDText) - } - - if builder.Len()-initialLen > 10 { - builder.WriteString("×") - } - - if d != 0 { - builder.WriteString("X") - } - if d > 1 { - builder.WriteString( - utils.ToSuperscript(strconv.Itoa(d)), - ) - } - - } - - if first { - return "0" - } - - return builder.String() -} diff --git a/tools/gnark/bn254/fr/polynomial/polynomial_test.go b/tools/gnark/bn254/fr/polynomial/polynomial_test.go deleted file mode 100644 index a97c4df6..00000000 --- a/tools/gnark/bn254/fr/polynomial/polynomial_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package polynomial - -import ( - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/stretchr/testify/assert" - "math/big" - "testing" -) - -func TestPolynomialEval(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // random value - var point fr.Element - point.SetRandom() - - // compute manually f(val) - var expectedEval, one, den fr.Element - var expo big.Int - one.SetOne() - expo.SetUint64(20) - expectedEval.Exp(point, &expo). - Sub(&expectedEval, &one) - den.Sub(&point, &one) - expectedEval.Div(&expectedEval, &den) - - // compute purported evaluation - purportedEval := f.Eval(&point) - - // check - if !purportedEval.Equal(&expectedEval) { - t.Fatal("polynomial evaluation failed") - } -} - -func TestPolynomialAddConstantInPlace(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // constant to add - var c fr.Element - c.SetRandom() - - // add constant - f.AddConstantInPlace(&c) - - // check - var expectedCoeffs, one fr.Element - one.SetOne() - expectedCoeffs.Add(&one, &c) - for i := 0; i < 20; i++ { - if !f[i].Equal(&expectedCoeffs) { - t.Fatal("AddConstantInPlace failed") - } - } -} - -func TestPolynomialSubConstantInPlace(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // constant to sub - var c fr.Element - c.SetRandom() - - // sub constant - f.SubConstantInPlace(&c) - - // check - var expectedCoeffs, one fr.Element - one.SetOne() - expectedCoeffs.Sub(&one, &c) - for i := 0; i < 20; i++ { - if !f[i].Equal(&expectedCoeffs) { - t.Fatal("SubConstantInPlace failed") - } - } -} - -func TestPolynomialScaleInPlace(t *testing.T) { - - // build polynomial - f := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f[i].SetOne() - } - - // constant to scale by - var c fr.Element - c.SetRandom() - - // scale by constant - f.ScaleInPlace(&c) - - // check - for i := 0; i < 20; i++ { - if !f[i].Equal(&c) { - t.Fatal("ScaleInPlace failed") - } - } - -} - -func TestPolynomialAdd(t *testing.T) { - - // build unbalanced polynomials - f1 := make(Polynomial, 20) - f1Backup := make(Polynomial, 20) - for i := 0; i < 20; i++ { - f1[i].SetOne() - f1Backup[i].SetOne() - } - f2 := make(Polynomial, 10) - f2Backup := make(Polynomial, 10) - for i := 0; i < 10; i++ { - f2[i].SetOne() - f2Backup[i].SetOne() - } - - // expected result - var one, two fr.Element - one.SetOne() - two.Double(&one) - expectedSum := make(Polynomial, 20) - for i := 0; i < 10; i++ { - expectedSum[i].Set(&two) - } - for i := 10; i < 20; i++ { - expectedSum[i].Set(&one) - } - - // caller is empty - var g Polynomial - g.Add(f1, f2) - if !g.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !f1.Equal(f1Backup) { - t.Fatal("side effect, f1 should not have been modified") - } - if !f2.Equal(f2Backup) { - t.Fatal("side effect, f2 should not have been modified") - } - - // all operands are distincts - _f1 := f1.Clone() - _f1.Add(f1, f2) - if !_f1.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !f1.Equal(f1Backup) { - t.Fatal("side effect, f1 should not have been modified") - } - if !f2.Equal(f2Backup) { - t.Fatal("side effect, f2 should not have been modified") - } - - // first operand = caller - _f1 = f1.Clone() - _f2 := f2.Clone() - _f1.Add(_f1, _f2) - if !_f1.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !_f2.Equal(f2Backup) { - t.Fatal("side effect, _f2 should not have been modified") - } - - // second operand = caller - _f1 = f1.Clone() - _f2 = f2.Clone() - _f1.Add(_f2, _f1) - if !_f1.Equal(expectedSum) { - t.Fatal("add polynomials fails") - } - if !_f2.Equal(f2Backup) { - t.Fatal("side effect, _f2 should not have been modified") - } -} - -func TestPolynomialText(t *testing.T) { - var one, negTwo fr.Element - one.SetOne() - negTwo.SetInt64(-2) - - p := Polynomial{one, negTwo, one} - - assert.Equal(t, "X² - 2X + 1", p.Text(10)) -} diff --git a/tools/gnark/bn254/fr/polynomial/pool.go b/tools/gnark/bn254/fr/polynomial/pool.go deleted file mode 100644 index 29ca322f..00000000 --- a/tools/gnark/bn254/fr/polynomial/pool.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package polynomial - -import ( - "encoding/json" - "fmt" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "reflect" - "runtime" - "sort" - "sync" - "unsafe" -) - -// Memory management for polynomials -// WARNING: This is not thread safe TODO: Make sure that is not a problem -// TODO: There is a lot of "unsafe" memory management here and needs to be vetted thoroughly - -type sizedPool struct { - maxN int - pool sync.Pool - stats poolStats -} - -type inUseData struct { - allocatedFor []uintptr - pool *sizedPool -} - -type Pool struct { - //lock sync.Mutex - inUse map[*fr.Element]inUseData - subPools []sizedPool -} - -func (p *sizedPool) get(n int) *fr.Element { - p.stats.maake(n) - return p.pool.Get().(*fr.Element) -} - -func (p *sizedPool) put(ptr *fr.Element) { - p.stats.dump() - p.pool.Put(ptr) -} - -func NewPool(maxN ...int) (pool Pool) { - - sort.Ints(maxN) - pool = Pool{ - inUse: make(map[*fr.Element]inUseData), - subPools: make([]sizedPool, len(maxN)), - } - - for i := range pool.subPools { - subPool := &pool.subPools[i] - subPool.maxN = maxN[i] - subPool.pool = sync.Pool{ - New: func() interface{} { - subPool.stats.Allocated++ - return getDataPointer(make([]fr.Element, 0, subPool.maxN)) - }, - } - } - return -} - -func (p *Pool) findCorrespondingPool(n int) *sizedPool { - poolI := 0 - for poolI < len(p.subPools) && n > p.subPools[poolI].maxN { - poolI++ - } - return &p.subPools[poolI] // out of bounds error here would mean that n is too large -} - -func (p *Pool) Make(n int) []fr.Element { - pool := p.findCorrespondingPool(n) - ptr := pool.get(n) - p.addInUse(ptr, pool) - return unsafe.Slice(ptr, n) -} - -// Dump dumps a set of polynomials into the pool -func (p *Pool) Dump(slices ...[]fr.Element) { - for _, slice := range slices { - ptr := getDataPointer(slice) - if metadata, ok := p.inUse[ptr]; ok { - delete(p.inUse, ptr) - metadata.pool.put(ptr) - } else { - panic("attempting to dump a slice not created by the pool") - } - } -} - -func (p *Pool) addInUse(ptr *fr.Element, pool *sizedPool) { - pcs := make([]uintptr, 2) - n := runtime.Callers(3, pcs) - - if prevPcs, ok := p.inUse[ptr]; ok { // TODO: remove if unnecessary for security - panic(fmt.Errorf("re-allocated non-dumped slice, previously allocated at %v", runtime.CallersFrames(prevPcs.allocatedFor))) - } - p.inUse[ptr] = inUseData{ - allocatedFor: pcs[:n], - pool: pool, - } -} - -func printFrame(frame runtime.Frame) { - fmt.Printf("\t%s line %d, function %s\n", frame.File, frame.Line, frame.Function) -} - -func (p *Pool) printInUse() { - fmt.Println("slices never dumped allocated at:") - for _, pcs := range p.inUse { - fmt.Println("-------------------------") - - var frame runtime.Frame - frames := runtime.CallersFrames(pcs.allocatedFor) - more := true - for more { - frame, more = frames.Next() - printFrame(frame) - } - } -} - -type poolStats struct { - Used int - Allocated int - ReuseRate float64 - InUse int - GreatestNUsed int - SmallestNUsed int -} - -type poolsStats struct { - SubPools []poolStats - InUse int -} - -func (s *poolStats) maake(n int) { - s.Used++ - s.InUse++ - if n > s.GreatestNUsed { - s.GreatestNUsed = n - } - if s.SmallestNUsed == 0 || s.SmallestNUsed > n { - s.SmallestNUsed = n - } -} - -func (s *poolStats) dump() { - s.InUse-- -} - -func (s *poolStats) finalize() { - s.ReuseRate = float64(s.Used) / float64(s.Allocated) -} - -func getDataPointer(slice []fr.Element) *fr.Element { - header := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - return (*fr.Element)(unsafe.Pointer(header.Data)) -} - -func (p *Pool) PrintPoolStats() { - InUse := 0 - subStats := make([]poolStats, len(p.subPools)) - for i := range p.subPools { - subPool := &p.subPools[i] - subPool.stats.finalize() - subStats[i] = subPool.stats - InUse += subPool.stats.InUse - } - - poolsStats := poolsStats{ - SubPools: subStats, - InUse: InUse, - } - serialized, _ := json.MarshalIndent(poolsStats, "", " ") - fmt.Println(string(serialized)) - p.printInUse() -} - -func (p *Pool) Clone(slice []fr.Element) []fr.Element { - res := p.Make(len(slice)) - copy(res, slice) - return res -} diff --git a/tools/gnark/bn254/fr/sumcheck/sumcheck.go b/tools/gnark/bn254/fr/sumcheck/sumcheck.go deleted file mode 100644 index c7de59c1..00000000 --- a/tools/gnark/bn254/fr/sumcheck/sumcheck.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package sumcheck - -import ( - "fmt" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/polynomial" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "strconv" -) - -// This does not make use of parallelism and represents polynomials as lists of coefficients -// It is currently geared towards arithmetic hashes. Once we have a more unified hash function interface, this can be generified. - -// Claims to a multi-sumcheck statement. i.e. one of the form ∑_{0≤i<2ⁿ} fⱼ(i) = cⱼ for 1 ≤ j ≤ m. -// Later evolving into a claim of the form gⱼ = ∑_{0≤i<2ⁿ⁻ʲ} g(r₁, r₂, ..., rⱼ₋₁, Xⱼ, i...) -type Claims interface { - Combine(a fr.Element) polynomial.Polynomial // Combine into the 0ᵗʰ sumcheck subclaim. Create g := ∑_{1≤j≤m} aʲ⁻¹fⱼ for which now we seek to prove ∑_{0≤i<2ⁿ} g(i) = c := ∑_{1≤j≤m} aʲ⁻¹cⱼ. Return g₁. - Next(fr.Element) polynomial.Polynomial // Return the evaluations gⱼ(k) for 1 ≤ k < degⱼ(g). Update the claim to gⱼ₊₁ for the input value as rⱼ - VarsNum() int //number of variables - ClaimsNum() int //number of claims - ProveFinalEval(r []fr.Element) interface{} //in case it is difficult for the verifier to compute g(r₁, ..., rₙ) on its own, the prover can provide the value and a proof -} - -// LazyClaims is the Claims data structure on the verifier side. It is "lazy" in that it has to compute fewer things. -type LazyClaims interface { - ClaimsNum() int // ClaimsNum = m - VarsNum() int // VarsNum = n - CombinedSum(a fr.Element) fr.Element // CombinedSum returns c = ∑_{1≤j≤m} aʲ⁻¹cⱼ - Degree(i int) int //Degree of the total claim in the i'th variable - VerifyFinalEval(r []fr.Element, combinationCoeff fr.Element, purportedValue fr.Element, proof interface{}) error -} - -// Proof of a multi-sumcheck statement. -type Proof struct { - PartialSumPolys []polynomial.Polynomial `json:"partialSumPolys"` - FinalEvalProof interface{} `json:"finalEvalProof"` //in case it is difficult for the verifier to compute g(r₁, ..., rₙ) on its own, the prover can provide the value and a proof -} - -func setupTranscript(claimsNum int, varsNum int, settings *fiatshamir.Settings) (challengeNames []string, err error) { - numChallenges := varsNum - if claimsNum >= 2 { - numChallenges++ - } - challengeNames = make([]string, numChallenges) - if claimsNum >= 2 { - challengeNames[0] = settings.Prefix + "comb" - } - prefix := settings.Prefix + "pSP." - for i := 0; i < varsNum; i++ { - challengeNames[i+numChallenges-varsNum] = prefix + strconv.Itoa(i) - } - if settings.Transcript == nil { - transcript := fiatshamir.NewTranscript(settings.Hash, challengeNames...) - settings.Transcript = &transcript - } - - for i := range settings.BaseChallenges { - if err = settings.Transcript.Bind(challengeNames[0], settings.BaseChallenges[i]); err != nil { - return - } - } - return -} - -func next(transcript *fiatshamir.Transcript, bindings []fr.Element, remainingChallengeNames *[]string) (fr.Element, error) { - challengeName := (*remainingChallengeNames)[0] - for i := range bindings { - bytes := bindings[i].Bytes() - if err := transcript.Bind(challengeName, bytes[:]); err != nil { - return fr.Element{}, err - } - } - var res fr.Element - bytes, err := transcript.ComputeChallenge(challengeName) - res.SetBytes(bytes) - - *remainingChallengeNames = (*remainingChallengeNames)[1:] - - return res, err -} - -// Prove create a non-interactive sumcheck proof -func Prove(claims Claims, transcriptSettings fiatshamir.Settings) (Proof, error) { - - var proof Proof - remainingChallengeNames, err := setupTranscript(claims.ClaimsNum(), claims.VarsNum(), &transcriptSettings) - transcript := transcriptSettings.Transcript - if err != nil { - return proof, err - } - - var combinationCoeff fr.Element - if claims.ClaimsNum() >= 2 { - if combinationCoeff, err = next(transcript, []fr.Element{}, &remainingChallengeNames); err != nil { - return proof, err - } - } - - varsNum := claims.VarsNum() - proof.PartialSumPolys = make([]polynomial.Polynomial, varsNum) - proof.PartialSumPolys[0] = claims.Combine(combinationCoeff) - challenges := make([]fr.Element, varsNum) - - for j := 0; j+1 < varsNum; j++ { - if challenges[j], err = next(transcript, proof.PartialSumPolys[j], &remainingChallengeNames); err != nil { - return proof, err - } - proof.PartialSumPolys[j+1] = claims.Next(challenges[j]) - } - - if challenges[varsNum-1], err = next(transcript, proof.PartialSumPolys[varsNum-1], &remainingChallengeNames); err != nil { - return proof, err - } - - proof.FinalEvalProof = claims.ProveFinalEval(challenges) - - return proof, nil -} - -func Verify(claims LazyClaims, proof Proof, transcriptSettings fiatshamir.Settings) error { - remainingChallengeNames, err := setupTranscript(claims.ClaimsNum(), claims.VarsNum(), &transcriptSettings) - transcript := transcriptSettings.Transcript - if err != nil { - return err - } - - var combinationCoeff fr.Element - - if claims.ClaimsNum() >= 2 { - if combinationCoeff, err = next(transcript, []fr.Element{}, &remainingChallengeNames); err != nil { - return err - } - } - - r := make([]fr.Element, claims.VarsNum()) - - // Just so that there is enough room for gJ to be reused - maxDegree := claims.Degree(0) - for j := 1; j < claims.VarsNum(); j++ { - if d := claims.Degree(j); d > maxDegree { - maxDegree = d - } - } - gJ := make(polynomial.Polynomial, maxDegree+1) //At the end of iteration j, gJ = ∑_{i < 2ⁿ⁻ʲ⁻¹} g(X₁, ..., Xⱼ₊₁, i...) NOTE: n is shorthand for claims.VarsNum() - gJR := claims.CombinedSum(combinationCoeff) // At the beginning of iteration j, gJR = ∑_{i < 2ⁿ⁻ʲ} g(r₁, ..., rⱼ, i...) - - for j := 0; j < claims.VarsNum(); j++ { - if len(proof.PartialSumPolys[j]) != claims.Degree(j) { - return fmt.Errorf("malformed proof") - } - copy(gJ[1:], proof.PartialSumPolys[j]) - gJ[0].Sub(&gJR, &proof.PartialSumPolys[j][0]) // Requirement that gⱼ(0) + gⱼ(1) = gⱼ₋₁(r) - // gJ is ready - - //Prepare for the next iteration - if r[j], err = next(transcript, proof.PartialSumPolys[j], &remainingChallengeNames); err != nil { - return err - } - // This is an extremely inefficient way of interpolating. TODO: Interpolate without symbolically computing a polynomial - gJCoeffs := polynomial.InterpolateOnRange(gJ[:(claims.Degree(j) + 1)]) - gJR = gJCoeffs.Eval(&r[j]) - } - - return claims.VerifyFinalEval(r, combinationCoeff, gJR, proof.FinalEvalProof) -} diff --git a/tools/gnark/bn254/fr/sumcheck/sumcheck_test.go b/tools/gnark/bn254/fr/sumcheck/sumcheck_test.go deleted file mode 100644 index d5d86b17..00000000 --- a/tools/gnark/bn254/fr/sumcheck/sumcheck_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package sumcheck - -import ( - "fmt" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/polynomial" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/test_vector_utils" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "github.com/stretchr/testify/assert" - "hash" - "math/bits" - "strings" - "testing" -) - -type singleMultilinClaim struct { - g polynomial.MultiLin -} - -func (c singleMultilinClaim) ProveFinalEval(r []fr.Element) interface{} { - return nil // verifier can compute the final eval itself -} - -func (c singleMultilinClaim) VarsNum() int { - return bits.TrailingZeros(uint(len(c.g))) -} - -func (c singleMultilinClaim) ClaimsNum() int { - return 1 -} - -func sumForX1One(g polynomial.MultiLin) polynomial.Polynomial { - sum := g[len(g)/2] - for i := len(g)/2 + 1; i < len(g); i++ { - sum.Add(&sum, &g[i]) - } - return []fr.Element{sum} -} - -func (c singleMultilinClaim) Combine(fr.Element) polynomial.Polynomial { - return sumForX1One(c.g) -} - -func (c *singleMultilinClaim) Next(r fr.Element) polynomial.Polynomial { - c.g.Fold(r) - return sumForX1One(c.g) -} - -type singleMultilinLazyClaim struct { - g polynomial.MultiLin - claimedSum fr.Element -} - -func (c singleMultilinLazyClaim) VerifyFinalEval(r []fr.Element, combinationCoeff fr.Element, purportedValue fr.Element, proof interface{}) error { - val := c.g.Evaluate(r, nil) - if val.Equal(&purportedValue) { - return nil - } - return fmt.Errorf("mismatch") -} - -func (c singleMultilinLazyClaim) CombinedSum(combinationCoeffs fr.Element) fr.Element { - return c.claimedSum -} - -func (c singleMultilinLazyClaim) Degree(i int) int { - return 1 -} - -func (c singleMultilinLazyClaim) ClaimsNum() int { - return 1 -} - -func (c singleMultilinLazyClaim) VarsNum() int { - return bits.TrailingZeros(uint(len(c.g))) -} - -func testSumcheckSingleClaimMultilin(polyInt []uint64, hashGenerator func() hash.Hash) error { - poly := make(polynomial.MultiLin, len(polyInt)) - for i, n := range polyInt { - poly[i].SetUint64(n) - } - - claim := singleMultilinClaim{g: poly.Clone()} - - proof, err := Prove(&claim, fiatshamir.WithHash(hashGenerator())) - if err != nil { - return err - } - - var sb strings.Builder - for _, p := range proof.PartialSumPolys { - - sb.WriteString("\t{") - for i := 0; i < len(p); i++ { - sb.WriteString(p[i].String()) - if i+1 < len(p) { - sb.WriteString(", ") - } - } - sb.WriteString("}\n") - } - - lazyClaim := singleMultilinLazyClaim{g: poly, claimedSum: poly.Sum()} - if err = Verify(lazyClaim, proof, fiatshamir.WithHash(hashGenerator())); err != nil { - return err - } - - proof.PartialSumPolys[0][0].Add(&proof.PartialSumPolys[0][0], test_vector_utils.ToElement(1)) - lazyClaim = singleMultilinLazyClaim{g: poly, claimedSum: poly.Sum()} - if Verify(lazyClaim, proof, fiatshamir.WithHash(hashGenerator())) == nil { - return fmt.Errorf("bad proof accepted") - } - return nil -} - -func TestSumcheckDeterministicHashSingleClaimMultilin(t *testing.T) { - //printMsws(36) - - polys := [][]uint64{ - {1, 2, 3, 4}, // 1 + 2X₁ + X₂ - {1, 2, 3, 4, 5, 6, 7, 8}, // 1 + 4X₁ + 2X₂ + X₃ - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, // 1 + 8X₁ + 4X₂ + 2X₃ + X₄ - } - - const MaxStep = 4 - const MaxStart = 4 - hashGens := make([]func() hash.Hash, 0, MaxStart*MaxStep) - - for step := 0; step < MaxStep; step++ { - for startState := 0; startState < MaxStart; startState++ { - if step == 0 && startState == 1 { // unlucky case where a bad proof would be accepted - continue - } - hashGens = append(hashGens, test_vector_utils.NewMessageCounterGenerator(startState, step)) - } - } - - for _, poly := range polys { - for _, hashGen := range hashGens { - assert.NoError(t, testSumcheckSingleClaimMultilin(poly, hashGen), - "failed with poly %v and hashGen %v", poly, hashGen()) - } - } -} diff --git a/tools/gnark/bn254/fr/test_vector_utils/test_vector_utils.go b/tools/gnark/bn254/fr/test_vector_utils/test_vector_utils.go deleted file mode 100644 index f39f6ae4..00000000 --- a/tools/gnark/bn254/fr/test_vector_utils/test_vector_utils.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2020 ConsenSys Software Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by consensys/gnark-crypto DO NOT EDIT - -package test_vector_utils - -import ( - "encoding/json" - "fmt" - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark-crypto/ecc/bn254/fr/polynomial" - "hash" - - "os" - "path/filepath" - "reflect" - "sort" - "strconv" - "strings" -) - -type ElementTriplet struct { - key1 fr.Element - key2 fr.Element - key2Present bool - value fr.Element - used bool -} - -func (t *ElementTriplet) CmpKey(o *ElementTriplet) int { - if cmp1 := t.key1.Cmp(&o.key1); cmp1 != 0 { - return cmp1 - } - - if t.key2Present { - if o.key2Present { - return t.key2.Cmp(&o.key2) - } - return 1 - } else { - if o.key2Present { - return -1 - } - return 0 - } -} - -var MapCache = make(map[string]*ElementMap) - -func ElementMapFromFile(path string) (*ElementMap, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - if h, ok := MapCache[path]; ok { - return h, nil - } - var bytes []byte - if bytes, err = os.ReadFile(path); err == nil { - var asMap map[string]interface{} - if err = json.Unmarshal(bytes, &asMap); err != nil { - return nil, err - } - - var h ElementMap - if h, err = CreateElementMap(asMap); err == nil { - MapCache[path] = &h - } - - return &h, err - - } else { - return nil, err - } -} - -func CreateElementMap(rawMap map[string]interface{}) (ElementMap, error) { - res := make(ElementMap, 0, len(rawMap)) - - for k, v := range rawMap { - var entry ElementTriplet - if _, err := SetElement(&entry.value, v); err != nil { - return nil, err - } - - key := strings.Split(k, ",") - switch len(key) { - case 1: - entry.key2Present = false - case 2: - entry.key2Present = true - if _, err := SetElement(&entry.key2, key[1]); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("cannot parse %T as one or two field elements", v) - } - if _, err := SetElement(&entry.key1, key[0]); err != nil { - return nil, err - } - - res = append(res, &entry) - } - - res.sort() - return res, nil -} - -type ElementMap []*ElementTriplet - -type MapHash struct { - Map *ElementMap - state fr.Element - stateValid bool -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func (m *MapHash) Write(p []byte) (n int, err error) { - var x fr.Element - for i := 0; i < len(p); i += fr.Bytes { - x.SetBytes(p[i:min(len(p), i+fr.Bytes)]) - if err = m.write(x); err != nil { - return - } - } - n = len(p) - return -} - -func (m *MapHash) Sum(b []byte) []byte { - mP := *m - if _, err := mP.Write(b); err != nil { - panic(err) - } - bytes := mP.state.Bytes() - return bytes[:] -} - -func (m *MapHash) Reset() { - m.stateValid = false -} - -func (m *MapHash) Size() int { - return fr.Bytes -} - -func (m *MapHash) BlockSize() int { - return fr.Bytes -} - -func (m *MapHash) write(x fr.Element) error { - X := &x - Y := &m.state - if !m.stateValid { - Y = nil - } - var err error - if m.state, err = m.Map.FindPair(X, Y); err == nil { - m.stateValid = true - } - return err -} - -func (t *ElementTriplet) writeKey(sb *strings.Builder) { - sb.WriteRune('"') - sb.WriteString(t.key1.String()) - if t.key2Present { - sb.WriteRune(',') - sb.WriteString(t.key2.String()) - } - sb.WriteRune('"') -} -func (m *ElementMap) UnusedEntries() []interface{} { - unused := make([]interface{}, 0) - for _, v := range *m { - if !v.used { - var vInterface interface{} - if v.key2Present { - vInterface = []interface{}{ElementToInterface(&v.key1), ElementToInterface(&v.key2)} - } else { - vInterface = ElementToInterface(&v.key1) - } - unused = append(unused, vInterface) - } - } - return unused -} - -func (m *ElementMap) sort() { - sort.Slice(*m, func(i, j int) bool { - return (*m)[i].CmpKey((*m)[j]) <= 0 - }) -} - -func (m *ElementMap) find(toFind *ElementTriplet) (fr.Element, error) { - i := sort.Search(len(*m), func(i int) bool { return (*m)[i].CmpKey(toFind) >= 0 }) - - if i < len(*m) && (*m)[i].CmpKey(toFind) == 0 { - (*m)[i].used = true - return (*m)[i].value, nil - } - var sb strings.Builder - sb.WriteString("no value available for input ") - toFind.writeKey(&sb) - return fr.Element{}, fmt.Errorf(sb.String()) -} - -func (m *ElementMap) FindPair(x *fr.Element, y *fr.Element) (fr.Element, error) { - - toFind := ElementTriplet{ - key1: *x, - key2Present: y != nil, - } - - if y != nil { - toFind.key2 = *y - } - - return m.find(&toFind) -} - -func ToElement(i int64) *fr.Element { - var res fr.Element - res.SetInt64(i) - return &res -} - -type MessageCounter struct { - startState uint64 - state uint64 - step uint64 -} - -func (m *MessageCounter) Write(p []byte) (n int, err error) { - inputBlockSize := (len(p)-1)/fr.Bytes + 1 - m.state += uint64(inputBlockSize) * m.step - return len(p), nil -} - -func (m *MessageCounter) Sum(b []byte) []byte { - inputBlockSize := (len(b)-1)/fr.Bytes + 1 - resI := m.state + uint64(inputBlockSize)*m.step - var res fr.Element - res.SetInt64(int64(resI)) - resBytes := res.Bytes() - return resBytes[:] -} - -func (m *MessageCounter) Reset() { - m.state = m.startState -} - -func (m *MessageCounter) Size() int { - return fr.Bytes -} - -func (m *MessageCounter) BlockSize() int { - return fr.Bytes -} - -func NewMessageCounter(startState, step int) hash.Hash { - transcript := &MessageCounter{startState: uint64(startState), state: uint64(startState), step: uint64(step)} - return transcript -} - -func NewMessageCounterGenerator(startState, step int) func() hash.Hash { - return func() hash.Hash { - return NewMessageCounter(startState, step) - } -} - -type ListHash []fr.Element - -func (h *ListHash) Write(p []byte) (n int, err error) { - return len(p), nil -} - -func (h *ListHash) Sum(b []byte) []byte { - res := (*h)[0].Bytes() - *h = (*h)[1:] - return res[:] -} - -func (h *ListHash) Reset() { -} - -func (h *ListHash) Size() int { - return fr.Bytes -} - -func (h *ListHash) BlockSize() int { - return fr.Bytes -} -func SetElement(z *fr.Element, value interface{}) (*fr.Element, error) { - - // TODO: Put this in element.SetString? - switch v := value.(type) { - case string: - - if sep := strings.Split(v, "/"); len(sep) == 2 { - var denom fr.Element - if _, err := z.SetString(sep[0]); err != nil { - return nil, err - } - if _, err := denom.SetString(sep[1]); err != nil { - return nil, err - } - denom.Inverse(&denom) - z.Mul(z, &denom) - return z, nil - } - - case float64: - asInt := int64(v) - if float64(asInt) != v { - return nil, fmt.Errorf("cannot currently parse float") - } - z.SetInt64(asInt) - return z, nil - } - - return z.SetInterface(value) -} - -func SliceToElementSlice[T any](slice []T) ([]fr.Element, error) { - elementSlice := make([]fr.Element, len(slice)) - for i, v := range slice { - if _, err := SetElement(&elementSlice[i], v); err != nil { - return nil, err - } - } - return elementSlice, nil -} - -func SliceEquals(a []fr.Element, b []fr.Element) error { - if len(a) != len(b) { - return fmt.Errorf("length mismatch %d≠%d", len(a), len(b)) - } - for i := range a { - if !a[i].Equal(&b[i]) { - return fmt.Errorf("at index %d: %s ≠ %s", i, a[i].String(), b[i].String()) - } - } - return nil -} - -func SliceSliceEquals(a [][]fr.Element, b [][]fr.Element) error { - if len(a) != len(b) { - return fmt.Errorf("length mismatch %d≠%d", len(a), len(b)) - } - for i := range a { - if err := SliceEquals(a[i], b[i]); err != nil { - return fmt.Errorf("at index %d: %w", i, err) - } - } - return nil -} - -func PolynomialSliceEquals(a []polynomial.Polynomial, b []polynomial.Polynomial) error { - if len(a) != len(b) { - return fmt.Errorf("length mismatch %d≠%d", len(a), len(b)) - } - for i := range a { - if err := SliceEquals(a[i], b[i]); err != nil { - return fmt.Errorf("at index %d: %w", i, err) - } - } - return nil -} - -func ElementToInterface(x *fr.Element) interface{} { - text := x.Text(10) - if len(text) < 10 && !strings.Contains(text, "/") { - if i, err := strconv.Atoi(text); err != nil { - panic(err.Error()) - } else { - return i - } - } - return text -} - -func ElementSliceToInterfaceSlice(x interface{}) []interface{} { - if x == nil { - return nil - } - - X := reflect.ValueOf(x) - - res := make([]interface{}, X.Len()) - for i := range res { - xI := X.Index(i).Interface().(fr.Element) - res[i] = ElementToInterface(&xI) - } - return res -} - -func ElementSliceSliceToInterfaceSliceSlice(x interface{}) [][]interface{} { - if x == nil { - return nil - } - - X := reflect.ValueOf(x) - - res := make([][]interface{}, X.Len()) - for i := range res { - res[i] = ElementSliceToInterfaceSlice(X.Index(i).Interface()) - } - - return res -} diff --git a/tools/gnark/bn254/fr/test_vector_utils/test_vector_utils_test.go b/tools/gnark/bn254/fr/test_vector_utils/test_vector_utils_test.go deleted file mode 100644 index 9ecbab36..00000000 --- a/tools/gnark/bn254/fr/test_vector_utils/test_vector_utils_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package test_vector_utils - -import ( - "github.com/consensys/gnark-crypto/ecc/bn254/fr" - fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" - "github.com/consensys/gnark-crypto/internal/generator/test_vector_utils/small_rational/test_vector_utils" - "github.com/stretchr/testify/assert" - "strconv" - "testing" -) - -func TestTranscript(t *testing.T) { - - mp, err := CreateElementMap(map[string]interface{}{ - strconv.Itoa('0'): 2, - "3,2": 5, - }) - assert.NoError(t, err) - - hsh := MapHash{Map: &mp} - transcript := fiatshamir.NewTranscript(&hsh, "0", "1") - bytes := ToElement(3).Bytes() - err = transcript.Bind("0", bytes[:]) - assert.NoError(t, err) - var cBytes []byte - cBytes, err = transcript.ComputeChallenge("0") - assert.NoError(t, err) - var res fr.Element - res.SetBytes(cBytes) - assert.True(t, ToElement(5).Equal(&res)) -} - -func TestCounterTranscriptInequality(t *testing.T) { - const challengeName = "fC.0" - t1 := fiatshamir.NewTranscript(test_vector_utils.NewMessageCounter(1, 1), challengeName) - t2 := fiatshamir.NewTranscript(test_vector_utils.NewMessageCounter(0, 1), challengeName) - var c1, c2 []byte - var err error - c1, err = t1.ComputeChallenge(challengeName) - assert.NoError(t, err) - c2, err = t2.ComputeChallenge(challengeName) - assert.NoError(t, err) - assert.NotEqual(t, c1, c2) -}