poly_commitment/ipa.rs
1//! IPA polynomial commitment scheme.
2//!
3//! This module contains the implementation of the polynomial commitment scheme
4//! called the Inner Product Argument (IPA) as described in [Efficient
5//! Zero-Knowledge Arguments for Arithmetic Circuits in the Discrete Log
6//! Setting](https://eprint.iacr.org/2016/263).
7
8use crate::{
9 commitment::{
10 b_poly, b_poly_coefficients, combine_commitments, shift_scalar, squeeze_challenge,
11 squeeze_prechallenge, BatchEvaluationProof, CommitmentCurve, EndoCurve,
12 },
13 error::CommitmentError,
14 hash_map_cache::HashMapCache,
15 utils::combine_polys,
16 BlindedCommitment, PolyComm, PolynomialsToCombine, SRS as SRSTrait,
17};
18use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM};
19use ark_ff::{BigInteger, Field, One, PrimeField, UniformRand, Zero};
20use ark_poly::{
21 univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D,
22};
23use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
24use blake2::{Blake2b512, Digest};
25use groupmap::GroupMap;
26use mina_poseidon::{sponge::ScalarChallenge, FqSponge};
27use o1_utils::{
28 field_helpers::{inner_prod, pows},
29 math,
30};
31use rand::{CryptoRng, RngCore};
32use rayon::prelude::*;
33use serde::{Deserialize, Serialize};
34use serde_with::serde_as;
35use std::{cmp::min, iter::Iterator, ops::AddAssign};
36use zeroize::Zeroize;
37
38#[serde_as]
39#[derive(Debug, Clone, Default, Serialize, Deserialize)]
40#[serde(bound = "G: CanonicalDeserialize + CanonicalSerialize")]
41pub struct SRS<G> {
42 /// The vector of group elements for committing to polynomials in
43 /// coefficient form.
44 #[serde_as(as = "Vec<o1_utils::serialization::SerdeAs>")]
45 pub g: Vec<G>,
46
47 /// A group element used for blinding commitments
48 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
49 pub h: G,
50
51 // TODO: the following field should be separated, as they are optimization
52 // values
53 /// Commitments to Lagrange bases, per domain size
54 #[serde(skip)]
55 pub lagrange_bases: HashMapCache<usize, Vec<PolyComm<G>>>,
56}
57
58impl<G> PartialEq for SRS<G>
59where
60 G: PartialEq,
61{
62 fn eq(&self, other: &Self) -> bool {
63 self.g == other.g && self.h == other.h
64 }
65}
66
67/// Computes the endomorphism coefficients (ξ, λ) for a curve.
68///
69/// For curves of the form y² = x³ + b (like Pallas and Vesta), there exists
70/// an efficient endomorphism φ defined by:
71///
72/// φ(x, y) = (ξ · x, y)
73///
74/// where ξ is a primitive cube root of unity in the base field (ξ³ = 1, ξ ≠ 1).
75/// This works because (ξx)³ = ξ³x³ = x³, so the point remains on the curve.
76///
77/// This endomorphism corresponds to scalar multiplication by λ:
78///
79/// φ(P) = \[λ\]P
80///
81/// where λ is a primitive cube root of unity in the scalar field.
82///
83/// # Returns
84///
85/// A tuple (`endo_q`, `endo_r`) where:
86/// - `endo_q` (ξ): cube root of unity in the base field `F_q`, used to compute φ(P)
87/// - `endo_r` (λ): the corresponding scalar in `F_r` such that φ(P) = \[λ\]P
88///
89/// # Mathematical Background
90///
91/// The cube root is computed as ξ = g^((p-1)/3) where g is a generator of `F_p*`.
92/// By Fermat's Little Theorem, ξ³ = g^(p-1) = 1.
93///
94/// Since there are two primitive cube roots of unity (ξ and ξ²), the function
95/// verifies which one corresponds to the endomorphism by checking:
96///
97/// \[`potential_λ`\]G == φ(G)
98///
99/// If not, it uses λ = `potential_λ²` instead.
100///
101/// # Panics
102///
103/// Panics if the generator point coordinates cannot be extracted.
104///
105/// # References
106///
107/// - Halo paper, Section 6.2: <https://eprint.iacr.org/2019/1021>
108/// - GLV method for fast scalar multiplication
109#[must_use]
110pub fn endos<G: CommitmentCurve>() -> (G::BaseField, G::ScalarField)
111where
112 G::BaseField: PrimeField,
113{
114 let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient();
115 let endo_r = {
116 let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient();
117 let t = G::generator();
118 let (x, y) = t.to_coordinates().unwrap();
119 let phi_t = G::of_coordinates(x * endo_q, y);
120 if t.mul(potential_endo_r) == phi_t.into_group() {
121 potential_endo_r
122 } else {
123 potential_endo_r * potential_endo_r
124 }
125 };
126 (endo_q, endo_r)
127}
128
129fn point_of_random_bytes<G: CommitmentCurve>(map: &G::Map, random_bytes: &[u8]) -> G
130where
131 G::BaseField: Field,
132{
133 // packing in bit-representation
134 const N: usize = 31;
135 #[allow(clippy::cast_possible_truncation)]
136 let extension_degree = G::BaseField::extension_degree() as usize;
137
138 let mut base_fields = Vec::with_capacity(N * extension_degree);
139
140 for base_count in 0..extension_degree {
141 let mut bits = [false; 8 * N];
142 let offset = base_count * N;
143 for i in 0..N {
144 for j in 0..8 {
145 bits[8 * i + j] = (random_bytes[offset + i] >> j) & 1 == 1;
146 }
147 }
148
149 let n =
150 <<G::BaseField as Field>::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits);
151 let t = <<G::BaseField as Field>::BasePrimeField as PrimeField>::from_bigint(n)
152 .expect("packing code has a bug");
153 base_fields.push(t);
154 }
155
156 let t = G::BaseField::from_base_prime_field_elems(base_fields).unwrap();
157
158 let (x, y) = map.to_group(t);
159 G::of_coordinates(x, y).mul_by_cofactor()
160}
161
162/// Additional methods for the SRS structure
163impl<G: CommitmentCurve> SRS<G> {
164 /// Verifies a batch of polynomial commitment opening proofs.
165 ///
166 /// This IPA verification method is primarily designed for integration into
167 /// recursive proof systems like Pickles. In recursive proofs, IPA verification
168 /// is deferred by storing accumulators (`RecursionChallenge`)
169 /// rather than verifying immediately in-circuit. This method performs the final
170 /// out-of-circuit verification of those accumulators.
171 ///
172 /// The function reconstructs the **challenge polynomial** `b(X)` from the IPA
173 /// challenges and uses it to verify the opening proofs. The challenge polynomial
174 /// is defined as:
175 /// ```text
176 /// b(X) = prod_{i=0}^{k-1} (1 + u_{k-i} * X^{2^i})
177 /// ```
178 /// where `u_1, ..., u_k` are the challenges from the `k` rounds of the IPA protocol.
179 /// See Section 3.2 of the [Halo paper](https://eprint.iacr.org/2019/1021.pdf).
180 ///
181 /// The verification reconstructs `b(X)` in two forms:
182 /// - **Evaluation form**: `b_poly(&chal, x)` computes `b(x)` in `O(k)` operations
183 /// - **Coefficient form**: `b_poly_coefficients(&chal)` returns the `2^k` coefficients
184 /// for the MSM `<s, G>` that verifies the accumulated commitment
185 ///
186 /// Note: The challenge polynomial reconstruction is specifically needed for recursive
187 /// proof verification. For standalone (non-recursive) IPA verification, a simpler
188 /// approach without `b(X)` reconstruction could be used.
189 ///
190 /// # Panics
191 ///
192 /// Panics if the number of scalars does not match the number of points.
193 ///
194 /// Returns `true` if verification succeeds, `false` otherwise.
195 pub fn verify<EFqSponge, RNG, const FULL_ROUNDS: usize>(
196 &self,
197 group_map: &G::Map,
198 batch: &mut [BatchEvaluationProof<
199 G,
200 EFqSponge,
201 OpeningProof<G, FULL_ROUNDS>,
202 FULL_ROUNDS,
203 >],
204 rng: &mut RNG,
205 ) -> bool
206 where
207 EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
208 RNG: RngCore + CryptoRng,
209 G::BaseField: PrimeField,
210 {
211 // Verifier checks for all i,
212 // c_i Q_i + delta_i = z1_i (G_i + b_i U_i) + z2_i H
213 //
214 // if we sample evalscale at random, it suffices to check
215 //
216 // 0 == sum_i evalscale^i (c_i Q_i + delta_i - ( z1_i (G_i + b_i U_i) + z2_i H ))
217 //
218 // and because each G_i is a multiexp on the same array self.g, we
219 // can batch the multiexp across proofs.
220 //
221 // So for each proof in the batch, we add onto our big multiexp the
222 // following terms
223 // evalscale^i c_i Q_i
224 // evalscale^i delta_i
225 // - (evalscale^i z1_i) G_i
226 // - (evalscale^i z2_i) H
227 // - (evalscale^i z1_i b_i) U_i
228
229 // We also check that the sg component of the proof is equal to the
230 // polynomial commitment to the "s" array
231
232 let nonzero_length = self.g.len();
233
234 let max_rounds = math::ceil_log2(nonzero_length);
235
236 let padded_length = 1 << max_rounds;
237
238 let (_, endo_r) = endos::<G>();
239
240 // TODO: This will need adjusting
241 let padding = padded_length - nonzero_length;
242 let mut points = vec![self.h];
243 points.extend(self.g.clone());
244 points.extend(vec![G::zero(); padding]);
245
246 let mut scalars = vec![G::ScalarField::zero(); padded_length + 1];
247 assert_eq!(scalars.len(), points.len());
248
249 // sample randomiser to scale the proofs with
250 let rand_base = G::ScalarField::rand(rng);
251 let sg_rand_base = G::ScalarField::rand(rng);
252
253 let mut rand_base_i = G::ScalarField::one();
254 let mut sg_rand_base_i = G::ScalarField::one();
255
256 for BatchEvaluationProof {
257 sponge,
258 evaluation_points,
259 polyscale,
260 evalscale,
261 evaluations,
262 opening,
263 combined_inner_product,
264 } in batch.iter_mut()
265 {
266 sponge.absorb_fr(&[shift_scalar::<G>(*combined_inner_product)]);
267
268 let u_base: G = {
269 let t = sponge.challenge_fq();
270 let (x, y) = group_map.to_group(t);
271 G::of_coordinates(x, y)
272 };
273
274 let Challenges { chal, chal_inv } = opening.challenges::<EFqSponge>(&endo_r, sponge);
275
276 sponge.absorb_g(&[opening.delta]);
277 let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
278
279 // Evaluate the challenge polynomial b(X) at each evaluation point and combine.
280 // This computes: b0 = sum_i evalscale^i * b(evaluation_point[i])
281 // where b(X) = prod_{j=0}^{k-1} (1 + u_{k-j} * X^{2^j}) is the challenge polynomial
282 // reconstructed from the IPA challenges `chal`.
283 let b0 = {
284 let mut scale = G::ScalarField::one();
285 let mut res = G::ScalarField::zero();
286 for &e in evaluation_points.iter() {
287 let term = b_poly(&chal, e);
288 res += &(scale * term);
289 scale *= *evalscale;
290 }
291 res
292 };
293
294 // Compute the 2^k coefficients of the challenge polynomial b(X).
295 // These are used in the MSM <s, G> to verify the accumulated commitment.
296 let s = b_poly_coefficients(&chal);
297
298 let neg_rand_base_i = -rand_base_i;
299
300 // TERM
301 // - rand_base_i z1 G
302 //
303 // we also add -sg_rand_base_i * G to check correctness of sg.
304 points.push(opening.sg);
305 scalars.push(neg_rand_base_i * opening.z1 - sg_rand_base_i);
306
307 // Here we add
308 // sg_rand_base_i * ( < s, self.g > )
309 // =
310 // < sg_rand_base_i s, self.g >
311 //
312 // to check correctness of the sg component.
313 {
314 let terms: Vec<_> = s.par_iter().map(|s| sg_rand_base_i * s).collect();
315
316 for (i, term) in terms.iter().enumerate() {
317 scalars[i + 1] += term;
318 }
319 }
320
321 // TERM
322 // - rand_base_i * z2 * H
323 scalars[0] -= &(rand_base_i * opening.z2);
324
325 // TERM
326 // -rand_base_i * (z1 * b0 * U)
327 scalars.push(neg_rand_base_i * (opening.z1 * b0));
328 points.push(u_base);
329
330 // TERM
331 // rand_base_i c_i Q_i
332 // = rand_base_i c_i
333 // (sum_j (chal_invs[j] L_j + chals[j] R_j) + P_prime)
334 // where P_prime = combined commitment + combined_inner_product * U
335 let rand_base_i_c_i = c * rand_base_i;
336 for ((l, r), (u_inv, u)) in opening.lr.iter().zip(chal_inv.iter().zip(chal.iter())) {
337 points.push(*l);
338 scalars.push(rand_base_i_c_i * u_inv);
339
340 points.push(*r);
341 scalars.push(rand_base_i_c_i * u);
342 }
343
344 // TERM
345 // sum_j evalscale^j (sum_i polyscale^i f_i) (elm_j)
346 // == sum_j sum_i evalscale^j polyscale^i f_i(elm_j)
347 // == sum_i polyscale^i sum_j evalscale^j f_i(elm_j)
348 combine_commitments(
349 evaluations,
350 &mut scalars,
351 &mut points,
352 *polyscale,
353 rand_base_i_c_i,
354 );
355
356 scalars.push(rand_base_i_c_i * *combined_inner_product);
357 points.push(u_base);
358
359 scalars.push(rand_base_i);
360 points.push(opening.delta);
361
362 rand_base_i *= &rand_base;
363 sg_rand_base_i *= &sg_rand_base;
364 }
365
366 // Verify the equation in two chunks, which is optimal for our SRS size.
367 // (see the comment to the `benchmark_msm_parallel_vesta` MSM benchmark)
368 let chunk_size = points.len() / 2;
369 let msm_res = points
370 .into_par_iter()
371 .chunks(chunk_size)
372 .zip(scalars.into_par_iter().chunks(chunk_size))
373 .map(|(bases, coeffs)| {
374 let coeffs_bigint = coeffs
375 .into_iter()
376 .map(ark_ff::PrimeField::into_bigint)
377 .collect::<Vec<_>>();
378 G::Group::msm_bigint(&bases, &coeffs_bigint)
379 })
380 .reduce(G::Group::zero, |mut l, r| {
381 l += r;
382 l
383 });
384
385 msm_res == G::Group::zero()
386 }
387
388 /// This function creates a trusted-setup SRS instance for circuits with
389 /// number of rows up to `depth`.
390 ///
391 /// # Security
392 ///
393 /// The internal accumulator `x_pow` is zeroized before returning.
394 /// The caller must ensure that `x` is securely zeroized after use.
395 /// Leaking `x` compromises the soundness of the proof system.
396 pub fn create_trusted_setup_with_toxic_waste(x: G::ScalarField, depth: usize) -> Self {
397 let m = G::Map::setup();
398
399 let mut x_pow = G::ScalarField::one();
400 let g: Vec<_> = (0..depth)
401 .map(|_| {
402 let res = G::generator().mul(x_pow);
403 x_pow *= x;
404 res.into_affine()
405 })
406 .collect();
407
408 // Zeroize internal accumulator derived from toxic waste
409 x_pow.zeroize();
410
411 // Compute a blinder
412 let h = {
413 let mut h = Blake2b512::new();
414 h.update(b"srs_misc");
415 // FIXME: This is for retrocompatibility with a previous version
416 // that was using a list initialisation. It is not necessary.
417 h.update(0_u32.to_be_bytes());
418 point_of_random_bytes(&m, &h.finalize())
419 };
420
421 Self {
422 g,
423 h,
424 lagrange_bases: HashMapCache::new(),
425 }
426 }
427}
428
429impl<G: CommitmentCurve> SRS<G>
430where
431 <G as CommitmentCurve>::Map: Sync,
432 G::BaseField: PrimeField,
433{
434 /// This function creates SRS instance for circuits with number of rows up
435 /// to `depth`.
436 ///
437 /// # Panics
438 ///
439 /// Panics if `depth` exceeds `u32::MAX`.
440 #[must_use]
441 pub fn create_parallel(depth: usize) -> Self {
442 let m = G::Map::setup();
443
444 let g: Vec<_> = (0..depth)
445 .into_par_iter()
446 .map(|i| {
447 let mut h = Blake2b512::new();
448 #[allow(clippy::cast_possible_truncation)]
449 h.update((i as u32).to_be_bytes());
450 point_of_random_bytes(&m, &h.finalize())
451 })
452 .collect();
453
454 // Compute a blinder
455 let h = {
456 let mut h = Blake2b512::new();
457 h.update(b"srs_misc");
458 // FIXME: This is for retrocompatibility with a previous version
459 // that was using a list initialisation. It is not necessary.
460 h.update(0_u32.to_be_bytes());
461 point_of_random_bytes(&m, &h.finalize())
462 };
463
464 Self {
465 g,
466 h,
467 lagrange_bases: HashMapCache::new(),
468 }
469 }
470}
471
472impl<G> SRSTrait<G> for SRS<G>
473where
474 G: CommitmentCurve,
475{
476 /// The maximum polynomial degree that can be committed to
477 fn max_poly_size(&self) -> usize {
478 self.g.len()
479 }
480
481 fn blinding_commitment(&self) -> G {
482 self.h
483 }
484
485 /// Turns a non-hiding polynomial commitment into a hiding polynomial
486 /// commitment. Transforms each given `<a, G>` into `(<a, G> + wH, w)` with
487 /// a random `w` per commitment.
488 fn mask(
489 &self,
490 comm: PolyComm<G>,
491 rng: &mut (impl RngCore + CryptoRng),
492 ) -> BlindedCommitment<G> {
493 let blinders = comm.map(|_| G::ScalarField::rand(rng));
494 self.mask_custom(comm, &blinders).unwrap()
495 }
496
497 fn mask_custom(
498 &self,
499 com: PolyComm<G>,
500 blinders: &PolyComm<G::ScalarField>,
501 ) -> Result<BlindedCommitment<G>, CommitmentError> {
502 let commitment = com
503 .zip(blinders)
504 .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))?
505 .map(|(g, b)| {
506 let mut g_masked = self.h.mul(b);
507 g_masked.add_assign(&g);
508 g_masked.into_affine()
509 });
510 Ok(BlindedCommitment {
511 commitment,
512 blinders: blinders.clone(),
513 })
514 }
515
516 fn commit_non_hiding(
517 &self,
518 plnm: &DensePolynomial<G::ScalarField>,
519 num_chunks: usize,
520 ) -> PolyComm<G> {
521 let is_zero = plnm.is_zero();
522
523 // chunk while committing
524 let mut chunks: Vec<_> = if is_zero {
525 vec![G::zero()]
526 } else if plnm.len() < self.g.len() {
527 vec![G::Group::msm(&self.g[..plnm.len()], &plnm.coeffs)
528 .unwrap()
529 .into_affine()]
530 } else if plnm.len() == self.g.len() {
531 // when processing a single chunk, it's faster to parallelise
532 // vertically in 2 threads (see the comment to the
533 // `benchmark_msm_parallel_vesta` MSM benchmark)
534 let n = self.g.len();
535 let (r1, r2) = rayon::join(
536 || G::Group::msm(&self.g[..n / 2], &plnm.coeffs[..n / 2]).unwrap(),
537 || G::Group::msm(&self.g[n / 2..n], &plnm.coeffs[n / 2..n]).unwrap(),
538 );
539
540 vec![(r1 + r2).into_affine()]
541 } else {
542 // otherwise it's better to parallelise horizontally along chunks
543 plnm.into_par_iter()
544 .chunks(self.g.len())
545 .map(|chunk| {
546 let chunk_coeffs = chunk
547 .into_iter()
548 .map(|c| c.into_bigint())
549 .collect::<Vec<_>>();
550 let chunk_res = G::Group::msm_bigint(&self.g, &chunk_coeffs);
551 chunk_res.into_affine()
552 })
553 .collect()
554 };
555
556 for _ in chunks.len()..num_chunks {
557 chunks.push(G::zero());
558 }
559
560 PolyComm::<G>::new(chunks)
561 }
562
563 fn commit(
564 &self,
565 plnm: &DensePolynomial<G::ScalarField>,
566 num_chunks: usize,
567 rng: &mut (impl RngCore + CryptoRng),
568 ) -> BlindedCommitment<G> {
569 self.mask(self.commit_non_hiding(plnm, num_chunks), rng)
570 }
571
572 fn commit_custom(
573 &self,
574 plnm: &DensePolynomial<G::ScalarField>,
575 num_chunks: usize,
576 blinders: &PolyComm<G::ScalarField>,
577 ) -> Result<BlindedCommitment<G>, CommitmentError> {
578 self.mask_custom(self.commit_non_hiding(plnm, num_chunks), blinders)
579 }
580
581 fn commit_evaluations_non_hiding(
582 &self,
583 domain: D<G::ScalarField>,
584 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
585 ) -> PolyComm<G> {
586 let basis = self.get_lagrange_basis(domain);
587 let commit_evaluations = |evals: &Vec<G::ScalarField>, basis: &Vec<PolyComm<G>>| {
588 let basis_refs: Vec<_> = basis.iter().collect();
589 PolyComm::<G>::multi_scalar_mul(&basis_refs, evals)
590 };
591 match domain.size.cmp(&plnm.domain().size) {
592 std::cmp::Ordering::Less => {
593 #[allow(clippy::cast_possible_truncation)]
594 let s = (plnm.domain().size / domain.size) as usize;
595 let v: Vec<_> = (0..(domain.size())).map(|i| plnm.evals[s * i]).collect();
596 commit_evaluations(&v, basis)
597 }
598 std::cmp::Ordering::Equal => commit_evaluations(&plnm.evals, basis),
599 std::cmp::Ordering::Greater => {
600 panic!("desired commitment domain size ({}) greater than evaluations' domain size ({}):", domain.size, plnm.domain().size)
601 }
602 }
603 }
604
605 fn commit_evaluations(
606 &self,
607 domain: D<G::ScalarField>,
608 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
609 rng: &mut (impl RngCore + CryptoRng),
610 ) -> BlindedCommitment<G> {
611 self.mask(self.commit_evaluations_non_hiding(domain, plnm), rng)
612 }
613
614 fn commit_evaluations_custom(
615 &self,
616 domain: D<G::ScalarField>,
617 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
618 blinders: &PolyComm<G::ScalarField>,
619 ) -> Result<BlindedCommitment<G>, CommitmentError> {
620 self.mask_custom(self.commit_evaluations_non_hiding(domain, plnm), blinders)
621 }
622
623 fn create(depth: usize) -> Self {
624 let m = G::Map::setup();
625
626 let g: Vec<_> = (0..depth)
627 .map(|i| {
628 let mut h = Blake2b512::new();
629 #[allow(clippy::cast_possible_truncation)]
630 h.update((i as u32).to_be_bytes());
631 point_of_random_bytes(&m, &h.finalize())
632 })
633 .collect();
634
635 // Compute a blinder
636 let h = {
637 let mut h = Blake2b512::new();
638 h.update(b"srs_misc");
639 // FIXME: This is for retrocompatibility with a previous version
640 // that was using a list initialisation. It is not necessary.
641 h.update(0_u32.to_be_bytes());
642 point_of_random_bytes(&m, &h.finalize())
643 };
644
645 Self {
646 g,
647 h,
648 lagrange_bases: HashMapCache::new(),
649 }
650 }
651
652 fn get_lagrange_basis_from_domain_size(&self, domain_size: usize) -> &Vec<PolyComm<G>> {
653 self.lagrange_bases.get_or_generate(domain_size, || {
654 self.lagrange_basis(D::new(domain_size).unwrap())
655 })
656 }
657
658 fn get_lagrange_basis(&self, domain: D<G::ScalarField>) -> &Vec<PolyComm<G>> {
659 self.lagrange_bases
660 .get_or_generate(domain.size(), || self.lagrange_basis(domain))
661 }
662
663 fn size(&self) -> usize {
664 self.g.len()
665 }
666}
667
668impl<G: CommitmentCurve> SRS<G> {
669 /// Creates an opening proof for a batch of polynomial commitments.
670 ///
671 /// This function implements the IPA (Inner Product Argument) prover. During the
672 /// `k = log_2(n)` rounds of folding, it implicitly constructs the **challenge
673 /// polynomial** `b(X)`.
674 ///
675 /// Note: The use of the challenge polynomial `b(X)` is a modification to the
676 /// original IPA protocol that improves efficiency in recursive proof settings. The challenge
677 /// polynomial is inspired from the "Amoritization strategy"" from [Recursive Proof
678 /// Composition without a Trusted Setup](https://eprint.iacr.org/2019/1021.pdf), section 3.2.
679 ///
680 /// # Panics
681 ///
682 /// Panics if IPA folding does not produce single elements after log rounds,
683 /// or if the challenge inverse cannot be computed.
684 #[allow(clippy::type_complexity)]
685 #[allow(clippy::many_single_char_names)]
686 #[allow(clippy::too_many_lines)]
687 pub fn open<EFqSponge, RNG, D: EvaluationDomain<G::ScalarField>, const FULL_ROUNDS: usize>(
688 &self,
689 group_map: &G::Map,
690 plnms: PolynomialsToCombine<G, D>,
691 elm: &[G::ScalarField],
692 polyscale: G::ScalarField,
693 evalscale: G::ScalarField,
694 mut sponge: EFqSponge,
695 rng: &mut RNG,
696 ) -> OpeningProof<G, FULL_ROUNDS>
697 where
698 EFqSponge: Clone + FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
699 RNG: RngCore + CryptoRng,
700 G::BaseField: PrimeField,
701 G: EndoCurve,
702 {
703 let (endo_q, endo_r) = endos::<G>();
704
705 let rounds = math::ceil_log2(self.g.len());
706 let padded_length = 1 << rounds;
707
708 // TODO: Trim this to the degree of the largest polynomial
709 // TODO: We do always suppose we have a power of 2 for the SRS in
710 // practice. Therefore, padding equals zero, and this code can be
711 // removed. Only a current test case uses a SRS with a non-power of 2.
712 let padding = padded_length - self.g.len();
713 let mut g = self.g.clone();
714 g.extend(vec![G::zero(); padding]);
715
716 // Combines polynomials roughly as follows: p(X) := ∑_i polyscale^i p_i(X)
717 //
718 // `blinding_factor` is a combined set of commitments that are
719 // paired with polynomials in `plnms`. In kimchi, these input
720 // commitments are poly com blinders, so often `[G::ScalarField::one();
721 // num_chunks]` or zeroes.
722 let (p, blinding_factor) = combine_polys::<G, D>(plnms, polyscale, self.g.len());
723
724 // The initial evaluation vector for polynomial commitment b_init is not
725 // just the powers of a single point as in the original IPA
726 // (1,ζ,ζ^2,...)
727 //
728 // but rather a vector of linearly combined powers with `evalscale` as
729 // recombiner.
730 //
731 // b_init[j] = Σ_i evalscale^i elm_i^j
732 // = ζ^j + evalscale * ζ^j ω^j (in the specific case of challenges (ζ,ζω))
733 //
734 // So in our case b_init is the following vector:
735 // 1 + evalscale
736 // ζ + evalscale * ζ ω
737 // ζ^2 + evalscale * (ζ ω)^2
738 // ζ^3 + evalscale * (ζ ω)^3
739 // ...
740 let b_init = {
741 // randomise/scale the eval powers
742 let mut scale = G::ScalarField::one();
743 let mut res: Vec<G::ScalarField> =
744 (0..padded_length).map(|_| G::ScalarField::zero()).collect();
745 for e in elm {
746 for (i, t) in pows(padded_length, *e).iter().enumerate() {
747 res[i] += &(scale * t);
748 }
749 scale *= &evalscale;
750 }
751 res
752 };
753
754 // Combined polynomial p(X) evaluated at the combined eval point b_init.
755 let combined_inner_product = p
756 .coeffs
757 .iter()
758 .zip(b_init.iter())
759 .map(|(a, b)| *a * b)
760 .fold(G::ScalarField::zero(), |acc, x| acc + x);
761
762 // Usually, the prover sends `combined_inner_product`` to the verifier
763 // So we should absorb `combined_inner_product``
764 // However it is more efficient in the recursion circuit
765 // to absorb a slightly modified version of it.
766 // As a reminder, in a recursive setting, the challenges are given as a
767 // public input and verified in the next iteration.
768 // See the `shift_scalar`` doc.
769 sponge.absorb_fr(&[shift_scalar::<G>(combined_inner_product)]);
770
771 // Generate another randomisation base U; our commitments will be w.r.t
772 // bases {G_i},H,U.
773 let u_base: G = {
774 let t = sponge.challenge_fq();
775 let (x, y) = group_map.to_group(t);
776 G::of_coordinates(x, y)
777 };
778
779 let mut a = p.coeffs;
780 assert!(padded_length >= a.len());
781 a.extend(vec![G::ScalarField::zero(); padded_length - a.len()]);
782
783 let mut b = b_init;
784
785 let mut lr = vec![];
786
787 let mut blinders = vec![];
788
789 let mut chals = vec![];
790 let mut chal_invs = vec![];
791
792 // The main IPA folding loop that has log iterations.
793 for _ in 0..rounds {
794 let n = g.len() / 2;
795 // Pedersen bases
796 let (g_lo, g_hi) = (&g[0..n], &g[n..]);
797 // Polynomial coefficients
798 let (a_lo, a_hi) = (&a[0..n], &a[n..]);
799 // Evaluation points
800 let (b_lo, b_hi) = (&b[0..n], &b[n..]);
801
802 // Blinders for L/R
803 let rand_l = <G::ScalarField as UniformRand>::rand(rng);
804 let rand_r = <G::ScalarField as UniformRand>::rand(rng);
805
806 // Pedersen commitment to a_lo,rand_l,<a_hi,b_lo>
807 let l = G::Group::msm_bigint(
808 &[g_lo, &[self.h, u_base]].concat(),
809 &[a_hi, &[rand_l, inner_prod(a_hi, b_lo)]]
810 .concat()
811 .iter()
812 .map(|x| x.into_bigint())
813 .collect::<Vec<_>>(),
814 )
815 .into_affine();
816
817 let r = G::Group::msm_bigint(
818 &[g_hi, &[self.h, u_base]].concat(),
819 &[a_lo, &[rand_r, inner_prod(a_lo, b_hi)]]
820 .concat()
821 .iter()
822 .map(|x| x.into_bigint())
823 .collect::<Vec<_>>(),
824 )
825 .into_affine();
826
827 lr.push((l, r));
828 blinders.push((rand_l, rand_r));
829
830 sponge.absorb_g(&[l]);
831 sponge.absorb_g(&[r]);
832
833 // Round #i challenges;
834 // - not to be confused with "u_base"
835 // - not to be confused with "u" as "polyscale"
836 let u_pre = squeeze_prechallenge(&mut sponge);
837 let u = u_pre.to_field(&endo_r);
838 let u_inv = u.inverse().unwrap();
839
840 chals.push(u);
841 chal_invs.push(u_inv);
842
843 // IPA-folding polynomial coefficients
844 a = a_hi
845 .par_iter()
846 .zip(a_lo)
847 .map(|(&hi, &lo)| {
848 // lo + u_inv * hi
849 let mut res = hi;
850 res *= u_inv;
851 res += &lo;
852 res
853 })
854 .collect();
855
856 // IPA-folding evaluation points.
857 // This folding implicitly constructs the challenge polynomial b(X):
858 // after all rounds, b[0] = b_poly(chals, evaluation_point).
859 b = b_lo
860 .par_iter()
861 .zip(b_hi)
862 .map(|(&lo, &hi)| {
863 // lo + u * hi
864 let mut res = hi;
865 res *= u;
866 res += &lo;
867 res
868 })
869 .collect();
870
871 // IPA-folding bases
872 g = G::combine_one_endo(endo_r, endo_q, g_lo, g_hi, &u_pre);
873 }
874
875 assert!(
876 g.len() == 1 && a.len() == 1 && b.len() == 1,
877 "IPA commitment folding must produce single elements after log rounds"
878 );
879 let a0 = a[0];
880 // b0 is the folded evaluation point, equal to b_poly(chals, evaluation_point).
881 // Note: The folding of `b` (and this extraction) could be skipped in a
882 // non-recursive setting where the verifier doesn't need to recompute
883 // the evaluation from challenges using b_poly.
884 let b0 = b[0];
885 let g0 = g[0];
886
887 // Compute r_prime, a folded blinder. It combines blinders on
888 // each individual step of the IPA folding process together
889 // with the final blinding_factor of the polynomial.
890 //
891 // r_prime := ∑_i (rand_l[i] * u[i]^{-1} + rand_r * u[i])
892 // + blinding_factor
893 //
894 // where u is a vector of folding challenges, and rand_l/rand_r are
895 // intermediate L/R blinders.
896 let r_prime = blinders
897 .iter()
898 .zip(chals.iter().zip(chal_invs.iter()))
899 .map(|((rand_l, rand_r), (u, u_inv))| ((*rand_l) * u_inv) + (*rand_r * u))
900 .fold(blinding_factor, |acc, x| acc + x);
901
902 let d = <G::ScalarField as UniformRand>::rand(rng);
903 let r_delta = <G::ScalarField as UniformRand>::rand(rng);
904
905 // Compute delta, the commitment
906 // delta = [d] G0 + \
907 // [b0*d] U_base + \
908 // [r_delta] H^r (as a group element, in additive notation)
909 let delta = ((g0.into_group() + (u_base.mul(b0))).into_affine().mul(d)
910 + self.h.mul(r_delta))
911 .into_affine();
912
913 sponge.absorb_g(&[delta]);
914 let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
915
916 // (?) Schnorr-like responses showing the knowledge of r_prime and a0.
917 let z1 = a0 * c + d;
918 let z2 = r_prime * c + r_delta;
919
920 OpeningProof {
921 delta,
922 lr,
923 z1,
924 z2,
925 sg: g0,
926 }
927 }
928
929 fn lagrange_basis(&self, domain: D<G::ScalarField>) -> Vec<PolyComm<G>> {
930 let n = domain.size();
931
932 // Let V be a vector space over the field F.
933 //
934 // Given
935 // - a domain [ 1, w, w^2, ..., w^{n - 1} ]
936 // - a vector v := [ v_0, ..., v_{n - 1} ] in V^n
937 //
938 // the FFT algorithm computes the matrix application
939 //
940 // u = M(w) * v
941 //
942 // where
943 // M(w) =
944 // 1 1 1 ... 1
945 // 1 w w^2 ... w^{n-1}
946 // ...
947 // 1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1}
948 //
949 // The IFFT algorithm computes
950 //
951 // v = M(w)^{-1} * u
952 //
953 // Let's see how we can use this algorithm to compute the lagrange basis
954 // commitments.
955 //
956 // Let V be the vector space F[x] of polynomials in x over F.
957 // Let v in V be the vector [ L_0, ..., L_{n - 1} ] where L_i is the i^{th}
958 // normalized Lagrange polynomial (where L_i(w^j) = j == i ? 1 : 0).
959 //
960 // Consider the rows of M(w) * v. Let me write out the matrix and vector
961 // so you can see more easily.
962 //
963 // | 1 1 1 ... 1 | | L_0 |
964 // | 1 w w^2 ... w^{n-1} | * | L_1 |
965 // | ... | | ... |
966 // | 1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1} | | L_{n-1} |
967 //
968 // The 0th row is L_0 + L1 + ... + L_{n - 1}. So, it's the polynomial
969 // that has the value 1 on every element of the domain.
970 // In other words, it's the polynomial 1.
971 //
972 // The 1st row is L_0 + w L_1 + ... + w^{n - 1} L_{n - 1}. So, it's the
973 // polynomial which has value w^i on w^i.
974 // In other words, it's the polynomial x.
975 //
976 // In general, you can see that row i is in fact the polynomial x^i.
977 //
978 // Thus, M(w) * v is the vector u, where u = [ 1, x, x^2, ..., x^n ]
979 //
980 // Therefore, the IFFT algorithm, when applied to the vector u (the
981 // standard monomial basis) will yield the vector v of the (normalized)
982 // Lagrange polynomials.
983 //
984 // Now, because the polynomial commitment scheme is additively
985 // homomorphic, and because the commitment to the polynomial x^i is just
986 // self.g[i], we can obtain commitments to the normalized Lagrange
987 // polynomials by applying IFFT to the vector self.g[0..n].
988 //
989 //
990 // Further still, we can do the same trick for 'chunked' polynomials.
991 //
992 // Recall that a chunked polynomial is some f of degree k*n - 1 with
993 // f(x) = f_0(x) + x^n f_1(x) + ... + x^{(k-1) n} f_{k-1}(x)
994 // where each f_i has degree n-1.
995 //
996 // In the above, if we set u = [ 1, x^2, ... x^{n-1}, 0, 0, .., 0 ]
997 // then we effectively 'zero out' any polynomial terms higher than
998 // x^{n-1}, leaving us with the 'partial Lagrange polynomials' that
999 // contribute to f_0.
1000 //
1001 // Similarly, u = [ 0, 0, ..., 0, 1, x^2, ..., x^{n-1}, 0, 0, ..., 0]
1002 // with n leading zeros 'zeroes out' all terms except the 'partial
1003 // Lagrange polynomials' that contribute to f_1, and likewise for each
1004 // f_i.
1005 //
1006 // By computing each of these, and recollecting the terms as a vector of
1007 // polynomial commitments, we obtain a chunked commitment to the L_i
1008 // polynomials.
1009 let srs_size = self.g.len();
1010 let num_elems = n.div_ceil(srs_size);
1011 let mut chunks = Vec::with_capacity(num_elems);
1012
1013 // For each chunk
1014 for i in 0..num_elems {
1015 // Initialize the vector with zero curve points
1016 let mut lg: Vec<<G as AffineRepr>::Group> = vec![<G as AffineRepr>::Group::zero(); n];
1017 // Overwrite the terms corresponding to that chunk with the SRS
1018 // curve points
1019 let start_offset = i * srs_size;
1020 let num_terms = min((i + 1) * srs_size, n) - start_offset;
1021 for j in 0..num_terms {
1022 lg[start_offset + j] = self.g[j].into_group();
1023 }
1024 // Apply the IFFT
1025 domain.ifft_in_place(&mut lg);
1026 // Append the 'partial Langrange polynomials' to the vector of elems
1027 // chunks
1028 chunks.push(<G as AffineRepr>::Group::normalize_batch(lg.as_mut_slice()));
1029 }
1030
1031 (0..n)
1032 .map(|i| PolyComm {
1033 chunks: chunks.iter().map(|v| v[i]).collect(),
1034 })
1035 .collect()
1036 }
1037}
1038
1039#[serde_as]
1040#[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
1041#[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")]
1042pub struct OpeningProof<G: AffineRepr, const FULL_ROUNDS: usize> {
1043 /// Vector of rounds of L & R commitments
1044 #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")]
1045 pub lr: Vec<(G, G)>,
1046 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1047 pub delta: G,
1048 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1049 pub z1: G::ScalarField,
1050 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1051 pub z2: G::ScalarField,
1052 /// A final folded commitment base
1053 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1054 pub sg: G,
1055}
1056
1057impl<
1058 BaseField: PrimeField,
1059 G: AffineRepr<BaseField = BaseField> + CommitmentCurve + EndoCurve,
1060 const FULL_ROUNDS: usize,
1061 > crate::OpenProof<G, FULL_ROUNDS> for OpeningProof<G, FULL_ROUNDS>
1062{
1063 type SRS = SRS<G>;
1064
1065 fn open<EFqSponge, RNG, D: EvaluationDomain<<G as AffineRepr>::ScalarField>>(
1066 srs: &Self::SRS,
1067 group_map: &<G as CommitmentCurve>::Map,
1068 plnms: PolynomialsToCombine<G, D>,
1069 elm: &[<G as AffineRepr>::ScalarField],
1070 polyscale: <G as AffineRepr>::ScalarField,
1071 evalscale: <G as AffineRepr>::ScalarField,
1072 sponge: EFqSponge,
1073 rng: &mut RNG,
1074 ) -> Self
1075 where
1076 EFqSponge: Clone
1077 + FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1078 RNG: RngCore + CryptoRng,
1079 {
1080 srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng)
1081 }
1082
1083 fn verify<EFqSponge, RNG>(
1084 srs: &Self::SRS,
1085 group_map: &G::Map,
1086 batch: &mut [BatchEvaluationProof<G, EFqSponge, Self, FULL_ROUNDS>],
1087 rng: &mut RNG,
1088 ) -> bool
1089 where
1090 EFqSponge:
1091 FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1092 RNG: RngCore + CryptoRng,
1093 {
1094 srs.verify(group_map, batch, rng)
1095 }
1096}
1097
1098/// Commitment round challenges (endo mapped) and their inverses.
1099pub struct Challenges<F> {
1100 pub chal: Vec<F>,
1101 pub chal_inv: Vec<F>,
1102}
1103
1104impl<G: AffineRepr, const FULL_ROUNDS: usize> OpeningProof<G, FULL_ROUNDS> {
1105 /// Computes a log-sized vector of scalar challenges for
1106 /// recombining elements inside the IPA.
1107 pub fn prechallenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1108 &self,
1109 sponge: &mut EFqSponge,
1110 ) -> Vec<ScalarChallenge<G::ScalarField>> {
1111 let _t = sponge.challenge_fq();
1112 self.lr
1113 .iter()
1114 .map(|(l, r)| {
1115 sponge.absorb_g(&[*l]);
1116 sponge.absorb_g(&[*r]);
1117 squeeze_prechallenge(sponge)
1118 })
1119 .collect()
1120 }
1121
1122 /// Same as `prechallenges`, but maps scalar challenges using the provided
1123 /// endomorphism, and computes their inverses.
1124 pub fn challenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1125 &self,
1126 endo_r: &G::ScalarField,
1127 sponge: &mut EFqSponge,
1128 ) -> Challenges<G::ScalarField> {
1129 let chal: Vec<_> = self
1130 .lr
1131 .iter()
1132 .map(|(l, r)| {
1133 sponge.absorb_g(&[*l]);
1134 sponge.absorb_g(&[*r]);
1135 squeeze_challenge(endo_r, sponge)
1136 })
1137 .collect();
1138
1139 let chal_inv = {
1140 let mut cs = chal.clone();
1141 ark_ff::batch_inversion(&mut cs);
1142 cs
1143 };
1144
1145 Challenges { chal, chal_inv }
1146 }
1147}
1148
1149#[cfg(feature = "ocaml_types")]
1150#[allow(non_local_definitions)]
1151pub mod caml {
1152 use super::OpeningProof;
1153 use ark_ec::AffineRepr;
1154 use ocaml;
1155
1156 #[derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)]
1157 pub struct CamlOpeningProof<G, F> {
1158 /// vector of rounds of L & R commitments
1159 pub lr: Vec<(G, G)>,
1160 pub delta: G,
1161 pub z1: F,
1162 pub z2: F,
1163 pub sg: G,
1164 }
1165
1166 impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<OpeningProof<G, FULL_ROUNDS>>
1167 for CamlOpeningProof<CamlG, CamlF>
1168 where
1169 G: AffineRepr,
1170 CamlG: From<G>,
1171 CamlF: From<G::ScalarField>,
1172 {
1173 fn from(opening_proof: OpeningProof<G, FULL_ROUNDS>) -> Self {
1174 Self {
1175 lr: opening_proof
1176 .lr
1177 .into_iter()
1178 .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2)))
1179 .collect(),
1180 delta: CamlG::from(opening_proof.delta),
1181 z1: opening_proof.z1.into(),
1182 z2: opening_proof.z2.into(),
1183 sg: CamlG::from(opening_proof.sg),
1184 }
1185 }
1186 }
1187
1188 impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<CamlOpeningProof<CamlG, CamlF>>
1189 for OpeningProof<G, FULL_ROUNDS>
1190 where
1191 G: AffineRepr,
1192 CamlG: Into<G>,
1193 CamlF: Into<G::ScalarField>,
1194 {
1195 fn from(caml: CamlOpeningProof<CamlG, CamlF>) -> Self {
1196 Self {
1197 lr: caml
1198 .lr
1199 .into_iter()
1200 .map(|(g1, g2)| (g1.into(), g2.into()))
1201 .collect(),
1202 delta: caml.delta.into(),
1203 z1: caml.z1.into(),
1204 z2: caml.z2.into(),
1205 sg: caml.sg.into(),
1206 }
1207 }
1208 }
1209}