poly_commitment/ipa.rs
1//! IPA polynomial commitment scheme.
2//!
3//! This module contains the implementation of the polynomial commitment scheme
4//! called the Inner Product Argument (IPA) as described in [Efficient
5//! Zero-Knowledge Arguments for Arithmetic Circuits in the Discrete Log
6//! Setting](https://eprint.iacr.org/2016/263).
7
8use crate::{
9 commitment::{
10 b_poly, b_poly_coefficients, combine_commitments, shift_scalar, squeeze_challenge,
11 squeeze_prechallenge, BatchEvaluationProof, CommitmentCurve, EndoCurve,
12 },
13 error::CommitmentError,
14 hash_map_cache::HashMapCache,
15 utils::combine_polys,
16 BlindedCommitment, PolyComm, PolynomialsToCombine, SRS as SRSTrait,
17};
18use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM};
19use ark_ff::{BigInteger, Field, One, PrimeField, UniformRand, Zero};
20use ark_poly::{
21 univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D,
22};
23use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
24use blake2::{Blake2b512, Digest};
25use groupmap::GroupMap;
26use mina_poseidon::{sponge::ScalarChallenge, FqSponge};
27use o1_utils::{
28 field_helpers::{inner_prod, pows},
29 math,
30};
31use rand::{CryptoRng, RngCore};
32use rayon::prelude::*;
33use serde::{Deserialize, Serialize};
34use serde_with::serde_as;
35use std::{cmp::min, iter::Iterator, ops::AddAssign};
36
37#[serde_as]
38#[derive(Debug, Clone, Default, Serialize, Deserialize)]
39#[serde(bound = "G: CanonicalDeserialize + CanonicalSerialize")]
40#[allow(clippy::unsafe_derive_deserialize)]
41pub struct SRS<G> {
42 /// The vector of group elements for committing to polynomials in
43 /// coefficient form.
44 #[serde_as(as = "Vec<o1_utils::serialization::SerdeAs>")]
45 pub g: Vec<G>,
46
47 /// A group element used for blinding commitments
48 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
49 pub h: G,
50
51 // TODO: the following field should be separated, as they are optimization
52 // values
53 /// Commitments to Lagrange bases, per domain size
54 #[serde(skip)]
55 pub lagrange_bases: HashMapCache<usize, Vec<PolyComm<G>>>,
56}
57
58impl<G> PartialEq for SRS<G>
59where
60 G: PartialEq,
61{
62 fn eq(&self, other: &Self) -> bool {
63 self.g == other.g && self.h == other.h
64 }
65}
66
67/// Computes the endomorphism coefficients (ξ, λ) for a curve.
68///
69/// For curves of the form y² = x³ + b (like Pallas and Vesta), there exists
70/// an efficient endomorphism φ defined by:
71///
72/// φ(x, y) = (ξ · x, y)
73///
74/// where ξ is a primitive cube root of unity in the base field (ξ³ = 1, ξ ≠ 1).
75/// This works because (ξx)³ = ξ³x³ = x³, so the point remains on the curve.
76///
77/// This endomorphism corresponds to scalar multiplication by λ:
78///
79/// φ(P) = \[λ\]P
80///
81/// where λ is a primitive cube root of unity in the scalar field.
82///
83/// # Returns
84///
85/// A tuple (`endo_q`, `endo_r`) where:
86/// - `endo_q` (ξ): cube root of unity in the base field `F_q`, used to compute φ(P)
87/// - `endo_r` (λ): the corresponding scalar in `F_r` such that φ(P) = \[λ\]P
88///
89/// # Mathematical Background
90///
91/// The cube root is computed as ξ = g^((p-1)/3) where g is a generator of `F_p*`.
92/// By Fermat's Little Theorem, ξ³ = g^(p-1) = 1.
93///
94/// Since there are two primitive cube roots of unity (ξ and ξ²), the function
95/// verifies which one corresponds to the endomorphism by checking:
96///
97/// \[`potential_λ`\]G == φ(G)
98///
99/// If not, it uses λ = `potential_λ²` instead.
100///
101/// # Panics
102///
103/// Panics if the generator point coordinates cannot be extracted.
104///
105/// # References
106///
107/// - Halo paper, Section 6.2: <https://eprint.iacr.org/2019/1021>
108/// - GLV method for fast scalar multiplication
109#[must_use]
110pub fn endos<G: CommitmentCurve>() -> (G::BaseField, G::ScalarField)
111where
112 G::BaseField: PrimeField,
113{
114 let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient();
115 let endo_r = {
116 let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient();
117 let t = G::generator();
118 let (x, y) = t.to_coordinates().unwrap();
119 let phi_t = G::of_coordinates(x * endo_q, y);
120 if t.mul(potential_endo_r) == phi_t.into_group() {
121 potential_endo_r
122 } else {
123 potential_endo_r * potential_endo_r
124 }
125 };
126 (endo_q, endo_r)
127}
128
129fn point_of_random_bytes<G: CommitmentCurve>(map: &G::Map, random_bytes: &[u8]) -> G
130where
131 G::BaseField: Field,
132{
133 // packing in bit-representation
134 const N: usize = 31;
135 #[allow(clippy::cast_possible_truncation)]
136 let extension_degree = G::BaseField::extension_degree() as usize;
137
138 let mut base_fields = Vec::with_capacity(N * extension_degree);
139
140 for base_count in 0..extension_degree {
141 let mut bits = [false; 8 * N];
142 let offset = base_count * N;
143 for i in 0..N {
144 for j in 0..8 {
145 bits[8 * i + j] = (random_bytes[offset + i] >> j) & 1 == 1;
146 }
147 }
148
149 let n =
150 <<G::BaseField as Field>::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits);
151 let t = <<G::BaseField as Field>::BasePrimeField as PrimeField>::from_bigint(n)
152 .expect("packing code has a bug");
153 base_fields.push(t);
154 }
155
156 let t = G::BaseField::from_base_prime_field_elems(base_fields).unwrap();
157
158 let (x, y) = map.to_group(t);
159 G::of_coordinates(x, y).mul_by_cofactor()
160}
161
162/// Additional methods for the SRS structure
163impl<G: CommitmentCurve> SRS<G> {
164 /// Verifies a batch of polynomial commitment opening proofs.
165 ///
166 /// This IPA verification method is primarily designed for integration into
167 /// recursive proof systems like Pickles. In recursive proofs, IPA verification
168 /// is deferred by storing accumulators (`RecursionChallenge`)
169 /// rather than verifying immediately in-circuit. This method performs the final
170 /// out-of-circuit verification of those accumulators.
171 ///
172 /// The function reconstructs the **challenge polynomial** `b(X)` from the IPA
173 /// challenges and uses it to verify the opening proofs. The challenge polynomial
174 /// is defined as:
175 /// ```text
176 /// b(X) = prod_{i=0}^{k-1} (1 + u_{k-i} * X^{2^i})
177 /// ```
178 /// where `u_1, ..., u_k` are the challenges from the `k` rounds of the IPA protocol.
179 /// See Section 3.2 of the [Halo paper](https://eprint.iacr.org/2019/1021.pdf).
180 ///
181 /// The verification reconstructs `b(X)` in two forms:
182 /// - **Evaluation form**: `b_poly(&chal, x)` computes `b(x)` in `O(k)` operations
183 /// - **Coefficient form**: `b_poly_coefficients(&chal)` returns the `2^k` coefficients
184 /// for the MSM `<s, G>` that verifies the accumulated commitment
185 ///
186 /// Note: The challenge polynomial reconstruction is specifically needed for recursive
187 /// proof verification. For standalone (non-recursive) IPA verification, a simpler
188 /// approach without `b(X)` reconstruction could be used.
189 ///
190 /// # Panics
191 ///
192 /// Panics if the number of scalars does not match the number of points.
193 ///
194 /// Returns `true` if verification succeeds, `false` otherwise.
195 pub fn verify<EFqSponge, RNG, const FULL_ROUNDS: usize>(
196 &self,
197 group_map: &G::Map,
198 batch: &mut [BatchEvaluationProof<
199 G,
200 EFqSponge,
201 OpeningProof<G, FULL_ROUNDS>,
202 FULL_ROUNDS,
203 >],
204 rng: &mut RNG,
205 ) -> bool
206 where
207 EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
208 RNG: RngCore + CryptoRng,
209 G::BaseField: PrimeField,
210 {
211 // Verifier checks for all i,
212 // c_i Q_i + delta_i = z1_i (G_i + b_i U_i) + z2_i H
213 //
214 // if we sample evalscale at random, it suffices to check
215 //
216 // 0 == sum_i evalscale^i (c_i Q_i + delta_i - ( z1_i (G_i + b_i U_i) + z2_i H ))
217 //
218 // and because each G_i is a multiexp on the same array self.g, we
219 // can batch the multiexp across proofs.
220 //
221 // So for each proof in the batch, we add onto our big multiexp the
222 // following terms
223 // evalscale^i c_i Q_i
224 // evalscale^i delta_i
225 // - (evalscale^i z1_i) G_i
226 // - (evalscale^i z2_i) H
227 // - (evalscale^i z1_i b_i) U_i
228
229 // We also check that the sg component of the proof is equal to the
230 // polynomial commitment to the "s" array
231
232 let nonzero_length = self.g.len();
233
234 let max_rounds = math::ceil_log2(nonzero_length);
235
236 let padded_length = 1 << max_rounds;
237
238 let (_, endo_r) = endos::<G>();
239
240 // TODO: This will need adjusting
241 let padding = padded_length - nonzero_length;
242 let mut points = vec![self.h];
243 points.extend(self.g.clone());
244 points.extend(vec![G::zero(); padding]);
245
246 let mut scalars = vec![G::ScalarField::zero(); padded_length + 1];
247 assert_eq!(scalars.len(), points.len());
248
249 // sample randomiser to scale the proofs with
250 let rand_base = G::ScalarField::rand(rng);
251 let sg_rand_base = G::ScalarField::rand(rng);
252
253 let mut rand_base_i = G::ScalarField::one();
254 let mut sg_rand_base_i = G::ScalarField::one();
255
256 for BatchEvaluationProof {
257 sponge,
258 evaluation_points,
259 polyscale,
260 evalscale,
261 evaluations,
262 opening,
263 combined_inner_product,
264 } in batch.iter_mut()
265 {
266 sponge.absorb_fr(&[shift_scalar::<G>(*combined_inner_product)]);
267
268 let u_base: G = {
269 let t = sponge.challenge_fq();
270 let (x, y) = group_map.to_group(t);
271 G::of_coordinates(x, y)
272 };
273
274 let Challenges { chal, chal_inv } = opening.challenges::<EFqSponge>(&endo_r, sponge);
275
276 sponge.absorb_g(&[opening.delta]);
277 let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
278
279 // Evaluate the challenge polynomial b(X) at each evaluation point and combine.
280 // This computes: b0 = sum_i evalscale^i * b(evaluation_point[i])
281 // where b(X) = prod_{j=0}^{k-1} (1 + u_{k-j} * X^{2^j}) is the challenge polynomial
282 // reconstructed from the IPA challenges `chal`.
283 let b0 = {
284 let mut scale = G::ScalarField::one();
285 let mut res = G::ScalarField::zero();
286 for &e in evaluation_points.iter() {
287 let term = b_poly(&chal, e);
288 res += &(scale * term);
289 scale *= *evalscale;
290 }
291 res
292 };
293
294 // Compute the 2^k coefficients of the challenge polynomial b(X).
295 // These are used in the MSM <s, G> to verify the accumulated commitment.
296 let s = b_poly_coefficients(&chal);
297
298 let neg_rand_base_i = -rand_base_i;
299
300 // TERM
301 // - rand_base_i z1 G
302 //
303 // we also add -sg_rand_base_i * G to check correctness of sg.
304 points.push(opening.sg);
305 scalars.push(neg_rand_base_i * opening.z1 - sg_rand_base_i);
306
307 // Here we add
308 // sg_rand_base_i * ( < s, self.g > )
309 // =
310 // < sg_rand_base_i s, self.g >
311 //
312 // to check correctness of the sg component.
313 {
314 let terms: Vec<_> = s.par_iter().map(|s| sg_rand_base_i * s).collect();
315
316 for (i, term) in terms.iter().enumerate() {
317 scalars[i + 1] += term;
318 }
319 }
320
321 // TERM
322 // - rand_base_i * z2 * H
323 scalars[0] -= &(rand_base_i * opening.z2);
324
325 // TERM
326 // -rand_base_i * (z1 * b0 * U)
327 scalars.push(neg_rand_base_i * (opening.z1 * b0));
328 points.push(u_base);
329
330 // TERM
331 // rand_base_i c_i Q_i
332 // = rand_base_i c_i
333 // (sum_j (chal_invs[j] L_j + chals[j] R_j) + P_prime)
334 // where P_prime = combined commitment + combined_inner_product * U
335 let rand_base_i_c_i = c * rand_base_i;
336 for ((l, r), (u_inv, u)) in opening.lr.iter().zip(chal_inv.iter().zip(chal.iter())) {
337 points.push(*l);
338 scalars.push(rand_base_i_c_i * u_inv);
339
340 points.push(*r);
341 scalars.push(rand_base_i_c_i * u);
342 }
343
344 // TERM
345 // sum_j evalscale^j (sum_i polyscale^i f_i) (elm_j)
346 // == sum_j sum_i evalscale^j polyscale^i f_i(elm_j)
347 // == sum_i polyscale^i sum_j evalscale^j f_i(elm_j)
348 combine_commitments(
349 evaluations,
350 &mut scalars,
351 &mut points,
352 *polyscale,
353 rand_base_i_c_i,
354 );
355
356 scalars.push(rand_base_i_c_i * *combined_inner_product);
357 points.push(u_base);
358
359 scalars.push(rand_base_i);
360 points.push(opening.delta);
361
362 rand_base_i *= &rand_base;
363 sg_rand_base_i *= &sg_rand_base;
364 }
365
366 // Verify the equation in two chunks, which is optimal for our SRS size.
367 // (see the comment to the `benchmark_msm_parallel_vesta` MSM benchmark)
368 let chunk_size = points.len() / 2;
369 let msm_res = points
370 .into_par_iter()
371 .chunks(chunk_size)
372 .zip(scalars.into_par_iter().chunks(chunk_size))
373 .map(|(bases, coeffs)| {
374 let coeffs_bigint = coeffs
375 .into_iter()
376 .map(ark_ff::PrimeField::into_bigint)
377 .collect::<Vec<_>>();
378 G::Group::msm_bigint(&bases, &coeffs_bigint)
379 })
380 .reduce(G::Group::zero, |mut l, r| {
381 l += r;
382 l
383 });
384
385 msm_res == G::Group::zero()
386 }
387
388 /// This function creates a trusted-setup SRS instance for circuits with
389 /// number of rows up to `depth`.
390 ///
391 /// # Safety
392 ///
393 /// This function is unsafe because it creates a trusted setup and the toxic
394 /// waste is passed as a parameter.
395 #[allow(unsafe_code)]
396 pub unsafe fn create_trusted_setup(x: G::ScalarField, depth: usize) -> Self {
397 let m = G::Map::setup();
398
399 let mut x_pow = G::ScalarField::one();
400 let g: Vec<_> = (0..depth)
401 .map(|_| {
402 let res = G::generator().mul(x_pow);
403 x_pow *= x;
404 res.into_affine()
405 })
406 .collect();
407
408 // Compute a blinder
409 let h = {
410 let mut h = Blake2b512::new();
411 h.update(b"srs_misc");
412 // FIXME: This is for retrocompatibility with a previous version
413 // that was using a list initialisation. It is not necessary.
414 h.update(0_u32.to_be_bytes());
415 point_of_random_bytes(&m, &h.finalize())
416 };
417
418 Self {
419 g,
420 h,
421 lagrange_bases: HashMapCache::new(),
422 }
423 }
424}
425
426impl<G: CommitmentCurve> SRS<G>
427where
428 <G as CommitmentCurve>::Map: Sync,
429 G::BaseField: PrimeField,
430{
431 /// This function creates SRS instance for circuits with number of rows up
432 /// to `depth`.
433 ///
434 /// # Panics
435 ///
436 /// Panics if `depth` exceeds `u32::MAX`.
437 #[must_use]
438 pub fn create_parallel(depth: usize) -> Self {
439 let m = G::Map::setup();
440
441 let g: Vec<_> = (0..depth)
442 .into_par_iter()
443 .map(|i| {
444 let mut h = Blake2b512::new();
445 #[allow(clippy::cast_possible_truncation)]
446 h.update((i as u32).to_be_bytes());
447 point_of_random_bytes(&m, &h.finalize())
448 })
449 .collect();
450
451 // Compute a blinder
452 let h = {
453 let mut h = Blake2b512::new();
454 h.update(b"srs_misc");
455 // FIXME: This is for retrocompatibility with a previous version
456 // that was using a list initialisation. It is not necessary.
457 h.update(0_u32.to_be_bytes());
458 point_of_random_bytes(&m, &h.finalize())
459 };
460
461 Self {
462 g,
463 h,
464 lagrange_bases: HashMapCache::new(),
465 }
466 }
467}
468
469impl<G> SRSTrait<G> for SRS<G>
470where
471 G: CommitmentCurve,
472{
473 /// The maximum polynomial degree that can be committed to
474 fn max_poly_size(&self) -> usize {
475 self.g.len()
476 }
477
478 fn blinding_commitment(&self) -> G {
479 self.h
480 }
481
482 /// Turns a non-hiding polynomial commitment into a hiding polynomial
483 /// commitment. Transforms each given `<a, G>` into `(<a, G> + wH, w)` with
484 /// a random `w` per commitment.
485 fn mask(
486 &self,
487 comm: PolyComm<G>,
488 rng: &mut (impl RngCore + CryptoRng),
489 ) -> BlindedCommitment<G> {
490 let blinders = comm.map(|_| G::ScalarField::rand(rng));
491 self.mask_custom(comm, &blinders).unwrap()
492 }
493
494 fn mask_custom(
495 &self,
496 com: PolyComm<G>,
497 blinders: &PolyComm<G::ScalarField>,
498 ) -> Result<BlindedCommitment<G>, CommitmentError> {
499 let commitment = com
500 .zip(blinders)
501 .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))?
502 .map(|(g, b)| {
503 let mut g_masked = self.h.mul(b);
504 g_masked.add_assign(&g);
505 g_masked.into_affine()
506 });
507 Ok(BlindedCommitment {
508 commitment,
509 blinders: blinders.clone(),
510 })
511 }
512
513 fn commit_non_hiding(
514 &self,
515 plnm: &DensePolynomial<G::ScalarField>,
516 num_chunks: usize,
517 ) -> PolyComm<G> {
518 let is_zero = plnm.is_zero();
519
520 // chunk while committing
521 let mut chunks: Vec<_> = if is_zero {
522 vec![G::zero()]
523 } else if plnm.len() < self.g.len() {
524 vec![G::Group::msm(&self.g[..plnm.len()], &plnm.coeffs)
525 .unwrap()
526 .into_affine()]
527 } else if plnm.len() == self.g.len() {
528 // when processing a single chunk, it's faster to parallelise
529 // vertically in 2 threads (see the comment to the
530 // `benchmark_msm_parallel_vesta` MSM benchmark)
531 let n = self.g.len();
532 let (r1, r2) = rayon::join(
533 || G::Group::msm(&self.g[..n / 2], &plnm.coeffs[..n / 2]).unwrap(),
534 || G::Group::msm(&self.g[n / 2..n], &plnm.coeffs[n / 2..n]).unwrap(),
535 );
536
537 vec![(r1 + r2).into_affine()]
538 } else {
539 // otherwise it's better to parallelise horizontally along chunks
540 plnm.into_par_iter()
541 .chunks(self.g.len())
542 .map(|chunk| {
543 let chunk_coeffs = chunk
544 .into_iter()
545 .map(|c| c.into_bigint())
546 .collect::<Vec<_>>();
547 let chunk_res = G::Group::msm_bigint(&self.g, &chunk_coeffs);
548 chunk_res.into_affine()
549 })
550 .collect()
551 };
552
553 for _ in chunks.len()..num_chunks {
554 chunks.push(G::zero());
555 }
556
557 PolyComm::<G>::new(chunks)
558 }
559
560 fn commit(
561 &self,
562 plnm: &DensePolynomial<G::ScalarField>,
563 num_chunks: usize,
564 rng: &mut (impl RngCore + CryptoRng),
565 ) -> BlindedCommitment<G> {
566 self.mask(self.commit_non_hiding(plnm, num_chunks), rng)
567 }
568
569 fn commit_custom(
570 &self,
571 plnm: &DensePolynomial<G::ScalarField>,
572 num_chunks: usize,
573 blinders: &PolyComm<G::ScalarField>,
574 ) -> Result<BlindedCommitment<G>, CommitmentError> {
575 self.mask_custom(self.commit_non_hiding(plnm, num_chunks), blinders)
576 }
577
578 fn commit_evaluations_non_hiding(
579 &self,
580 domain: D<G::ScalarField>,
581 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
582 ) -> PolyComm<G> {
583 let basis = self.get_lagrange_basis(domain);
584 let commit_evaluations = |evals: &Vec<G::ScalarField>, basis: &Vec<PolyComm<G>>| {
585 let basis_refs: Vec<_> = basis.iter().collect();
586 PolyComm::<G>::multi_scalar_mul(&basis_refs, evals)
587 };
588 match domain.size.cmp(&plnm.domain().size) {
589 std::cmp::Ordering::Less => {
590 #[allow(clippy::cast_possible_truncation)]
591 let s = (plnm.domain().size / domain.size) as usize;
592 let v: Vec<_> = (0..(domain.size())).map(|i| plnm.evals[s * i]).collect();
593 commit_evaluations(&v, basis)
594 }
595 std::cmp::Ordering::Equal => commit_evaluations(&plnm.evals, basis),
596 std::cmp::Ordering::Greater => {
597 panic!("desired commitment domain size ({}) greater than evaluations' domain size ({}):", domain.size, plnm.domain().size)
598 }
599 }
600 }
601
602 fn commit_evaluations(
603 &self,
604 domain: D<G::ScalarField>,
605 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
606 rng: &mut (impl RngCore + CryptoRng),
607 ) -> BlindedCommitment<G> {
608 self.mask(self.commit_evaluations_non_hiding(domain, plnm), rng)
609 }
610
611 fn commit_evaluations_custom(
612 &self,
613 domain: D<G::ScalarField>,
614 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
615 blinders: &PolyComm<G::ScalarField>,
616 ) -> Result<BlindedCommitment<G>, CommitmentError> {
617 self.mask_custom(self.commit_evaluations_non_hiding(domain, plnm), blinders)
618 }
619
620 fn create(depth: usize) -> Self {
621 let m = G::Map::setup();
622
623 let g: Vec<_> = (0..depth)
624 .map(|i| {
625 let mut h = Blake2b512::new();
626 #[allow(clippy::cast_possible_truncation)]
627 h.update((i as u32).to_be_bytes());
628 point_of_random_bytes(&m, &h.finalize())
629 })
630 .collect();
631
632 // Compute a blinder
633 let h = {
634 let mut h = Blake2b512::new();
635 h.update(b"srs_misc");
636 // FIXME: This is for retrocompatibility with a previous version
637 // that was using a list initialisation. It is not necessary.
638 h.update(0_u32.to_be_bytes());
639 point_of_random_bytes(&m, &h.finalize())
640 };
641
642 Self {
643 g,
644 h,
645 lagrange_bases: HashMapCache::new(),
646 }
647 }
648
649 fn get_lagrange_basis_from_domain_size(&self, domain_size: usize) -> &Vec<PolyComm<G>> {
650 self.lagrange_bases.get_or_generate(domain_size, || {
651 self.lagrange_basis(D::new(domain_size).unwrap())
652 })
653 }
654
655 fn get_lagrange_basis(&self, domain: D<G::ScalarField>) -> &Vec<PolyComm<G>> {
656 self.lagrange_bases
657 .get_or_generate(domain.size(), || self.lagrange_basis(domain))
658 }
659
660 fn size(&self) -> usize {
661 self.g.len()
662 }
663}
664
665impl<G: CommitmentCurve> SRS<G> {
666 /// Creates an opening proof for a batch of polynomial commitments.
667 ///
668 /// This function implements the IPA (Inner Product Argument) prover. During the
669 /// `k = log_2(n)` rounds of folding, it implicitly constructs the **challenge
670 /// polynomial** `b(X)`.
671 ///
672 /// Note: The use of the challenge polynomial `b(X)` is a modification to the
673 /// original IPA protocol that improves efficiency in recursive proof settings. The challenge
674 /// polynomial is inspired from the "Amoritization strategy"" from [Recursive Proof
675 /// Composition without a Trusted Setup](https://eprint.iacr.org/2019/1021.pdf), section 3.2.
676 ///
677 /// # Panics
678 ///
679 /// Panics if IPA folding does not produce single elements after log rounds,
680 /// or if the challenge inverse cannot be computed.
681 #[allow(clippy::type_complexity)]
682 #[allow(clippy::many_single_char_names)]
683 #[allow(clippy::too_many_lines)]
684 pub fn open<EFqSponge, RNG, D: EvaluationDomain<G::ScalarField>, const FULL_ROUNDS: usize>(
685 &self,
686 group_map: &G::Map,
687 plnms: PolynomialsToCombine<G, D>,
688 elm: &[G::ScalarField],
689 polyscale: G::ScalarField,
690 evalscale: G::ScalarField,
691 mut sponge: EFqSponge,
692 rng: &mut RNG,
693 ) -> OpeningProof<G, FULL_ROUNDS>
694 where
695 EFqSponge: Clone + FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
696 RNG: RngCore + CryptoRng,
697 G::BaseField: PrimeField,
698 G: EndoCurve,
699 {
700 let (endo_q, endo_r) = endos::<G>();
701
702 let rounds = math::ceil_log2(self.g.len());
703 let padded_length = 1 << rounds;
704
705 // TODO: Trim this to the degree of the largest polynomial
706 // TODO: We do always suppose we have a power of 2 for the SRS in
707 // practice. Therefore, padding equals zero, and this code can be
708 // removed. Only a current test case uses a SRS with a non-power of 2.
709 let padding = padded_length - self.g.len();
710 let mut g = self.g.clone();
711 g.extend(vec![G::zero(); padding]);
712
713 // Combines polynomials roughly as follows: p(X) := ∑_i polyscale^i p_i(X)
714 //
715 // `blinding_factor` is a combined set of commitments that are
716 // paired with polynomials in `plnms`. In kimchi, these input
717 // commitments are poly com blinders, so often `[G::ScalarField::one();
718 // num_chunks]` or zeroes.
719 let (p, blinding_factor) = combine_polys::<G, D>(plnms, polyscale, self.g.len());
720
721 // The initial evaluation vector for polynomial commitment b_init is not
722 // just the powers of a single point as in the original IPA
723 // (1,ζ,ζ^2,...)
724 //
725 // but rather a vector of linearly combined powers with `evalscale` as
726 // recombiner.
727 //
728 // b_init[j] = Σ_i evalscale^i elm_i^j
729 // = ζ^j + evalscale * ζ^j ω^j (in the specific case of challenges (ζ,ζω))
730 //
731 // So in our case b_init is the following vector:
732 // 1 + evalscale
733 // ζ + evalscale * ζ ω
734 // ζ^2 + evalscale * (ζ ω)^2
735 // ζ^3 + evalscale * (ζ ω)^3
736 // ...
737 let b_init = {
738 // randomise/scale the eval powers
739 let mut scale = G::ScalarField::one();
740 let mut res: Vec<G::ScalarField> =
741 (0..padded_length).map(|_| G::ScalarField::zero()).collect();
742 for e in elm {
743 for (i, t) in pows(padded_length, *e).iter().enumerate() {
744 res[i] += &(scale * t);
745 }
746 scale *= &evalscale;
747 }
748 res
749 };
750
751 // Combined polynomial p(X) evaluated at the combined eval point b_init.
752 let combined_inner_product = p
753 .coeffs
754 .iter()
755 .zip(b_init.iter())
756 .map(|(a, b)| *a * b)
757 .fold(G::ScalarField::zero(), |acc, x| acc + x);
758
759 // Usually, the prover sends `combined_inner_product`` to the verifier
760 // So we should absorb `combined_inner_product``
761 // However it is more efficient in the recursion circuit
762 // to absorb a slightly modified version of it.
763 // As a reminder, in a recursive setting, the challenges are given as a
764 // public input and verified in the next iteration.
765 // See the `shift_scalar`` doc.
766 sponge.absorb_fr(&[shift_scalar::<G>(combined_inner_product)]);
767
768 // Generate another randomisation base U; our commitments will be w.r.t
769 // bases {G_i},H,U.
770 let u_base: G = {
771 let t = sponge.challenge_fq();
772 let (x, y) = group_map.to_group(t);
773 G::of_coordinates(x, y)
774 };
775
776 let mut a = p.coeffs;
777 assert!(padded_length >= a.len());
778 a.extend(vec![G::ScalarField::zero(); padded_length - a.len()]);
779
780 let mut b = b_init;
781
782 let mut lr = vec![];
783
784 let mut blinders = vec![];
785
786 let mut chals = vec![];
787 let mut chal_invs = vec![];
788
789 // The main IPA folding loop that has log iterations.
790 for _ in 0..rounds {
791 let n = g.len() / 2;
792 // Pedersen bases
793 let (g_lo, g_hi) = (&g[0..n], &g[n..]);
794 // Polynomial coefficients
795 let (a_lo, a_hi) = (&a[0..n], &a[n..]);
796 // Evaluation points
797 let (b_lo, b_hi) = (&b[0..n], &b[n..]);
798
799 // Blinders for L/R
800 let rand_l = <G::ScalarField as UniformRand>::rand(rng);
801 let rand_r = <G::ScalarField as UniformRand>::rand(rng);
802
803 // Pedersen commitment to a_lo,rand_l,<a_hi,b_lo>
804 let l = G::Group::msm_bigint(
805 &[g_lo, &[self.h, u_base]].concat(),
806 &[a_hi, &[rand_l, inner_prod(a_hi, b_lo)]]
807 .concat()
808 .iter()
809 .map(|x| x.into_bigint())
810 .collect::<Vec<_>>(),
811 )
812 .into_affine();
813
814 let r = G::Group::msm_bigint(
815 &[g_hi, &[self.h, u_base]].concat(),
816 &[a_lo, &[rand_r, inner_prod(a_lo, b_hi)]]
817 .concat()
818 .iter()
819 .map(|x| x.into_bigint())
820 .collect::<Vec<_>>(),
821 )
822 .into_affine();
823
824 lr.push((l, r));
825 blinders.push((rand_l, rand_r));
826
827 sponge.absorb_g(&[l]);
828 sponge.absorb_g(&[r]);
829
830 // Round #i challenges;
831 // - not to be confused with "u_base"
832 // - not to be confused with "u" as "polyscale"
833 let u_pre = squeeze_prechallenge(&mut sponge);
834 let u = u_pre.to_field(&endo_r);
835 let u_inv = u.inverse().unwrap();
836
837 chals.push(u);
838 chal_invs.push(u_inv);
839
840 // IPA-folding polynomial coefficients
841 a = a_hi
842 .par_iter()
843 .zip(a_lo)
844 .map(|(&hi, &lo)| {
845 // lo + u_inv * hi
846 let mut res = hi;
847 res *= u_inv;
848 res += &lo;
849 res
850 })
851 .collect();
852
853 // IPA-folding evaluation points.
854 // This folding implicitly constructs the challenge polynomial b(X):
855 // after all rounds, b[0] = b_poly(chals, evaluation_point).
856 b = b_lo
857 .par_iter()
858 .zip(b_hi)
859 .map(|(&lo, &hi)| {
860 // lo + u * hi
861 let mut res = hi;
862 res *= u;
863 res += &lo;
864 res
865 })
866 .collect();
867
868 // IPA-folding bases
869 g = G::combine_one_endo(endo_r, endo_q, g_lo, g_hi, &u_pre);
870 }
871
872 assert!(
873 g.len() == 1 && a.len() == 1 && b.len() == 1,
874 "IPA commitment folding must produce single elements after log rounds"
875 );
876 let a0 = a[0];
877 // b0 is the folded evaluation point, equal to b_poly(chals, evaluation_point).
878 // Note: The folding of `b` (and this extraction) could be skipped in a
879 // non-recursive setting where the verifier doesn't need to recompute
880 // the evaluation from challenges using b_poly.
881 let b0 = b[0];
882 let g0 = g[0];
883
884 // Compute r_prime, a folded blinder. It combines blinders on
885 // each individual step of the IPA folding process together
886 // with the final blinding_factor of the polynomial.
887 //
888 // r_prime := ∑_i (rand_l[i] * u[i]^{-1} + rand_r * u[i])
889 // + blinding_factor
890 //
891 // where u is a vector of folding challenges, and rand_l/rand_r are
892 // intermediate L/R blinders.
893 let r_prime = blinders
894 .iter()
895 .zip(chals.iter().zip(chal_invs.iter()))
896 .map(|((rand_l, rand_r), (u, u_inv))| ((*rand_l) * u_inv) + (*rand_r * u))
897 .fold(blinding_factor, |acc, x| acc + x);
898
899 let d = <G::ScalarField as UniformRand>::rand(rng);
900 let r_delta = <G::ScalarField as UniformRand>::rand(rng);
901
902 // Compute delta, the commitment
903 // delta = [d] G0 + \
904 // [b0*d] U_base + \
905 // [r_delta] H^r (as a group element, in additive notation)
906 let delta = ((g0.into_group() + (u_base.mul(b0))).into_affine().mul(d)
907 + self.h.mul(r_delta))
908 .into_affine();
909
910 sponge.absorb_g(&[delta]);
911 let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
912
913 // (?) Schnorr-like responses showing the knowledge of r_prime and a0.
914 let z1 = a0 * c + d;
915 let z2 = r_prime * c + r_delta;
916
917 OpeningProof {
918 delta,
919 lr,
920 z1,
921 z2,
922 sg: g0,
923 }
924 }
925
926 fn lagrange_basis(&self, domain: D<G::ScalarField>) -> Vec<PolyComm<G>> {
927 let n = domain.size();
928
929 // Let V be a vector space over the field F.
930 //
931 // Given
932 // - a domain [ 1, w, w^2, ..., w^{n - 1} ]
933 // - a vector v := [ v_0, ..., v_{n - 1} ] in V^n
934 //
935 // the FFT algorithm computes the matrix application
936 //
937 // u = M(w) * v
938 //
939 // where
940 // M(w) =
941 // 1 1 1 ... 1
942 // 1 w w^2 ... w^{n-1}
943 // ...
944 // 1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1}
945 //
946 // The IFFT algorithm computes
947 //
948 // v = M(w)^{-1} * u
949 //
950 // Let's see how we can use this algorithm to compute the lagrange basis
951 // commitments.
952 //
953 // Let V be the vector space F[x] of polynomials in x over F.
954 // Let v in V be the vector [ L_0, ..., L_{n - 1} ] where L_i is the i^{th}
955 // normalized Lagrange polynomial (where L_i(w^j) = j == i ? 1 : 0).
956 //
957 // Consider the rows of M(w) * v. Let me write out the matrix and vector
958 // so you can see more easily.
959 //
960 // | 1 1 1 ... 1 | | L_0 |
961 // | 1 w w^2 ... w^{n-1} | * | L_1 |
962 // | ... | | ... |
963 // | 1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1} | | L_{n-1} |
964 //
965 // The 0th row is L_0 + L1 + ... + L_{n - 1}. So, it's the polynomial
966 // that has the value 1 on every element of the domain.
967 // In other words, it's the polynomial 1.
968 //
969 // The 1st row is L_0 + w L_1 + ... + w^{n - 1} L_{n - 1}. So, it's the
970 // polynomial which has value w^i on w^i.
971 // In other words, it's the polynomial x.
972 //
973 // In general, you can see that row i is in fact the polynomial x^i.
974 //
975 // Thus, M(w) * v is the vector u, where u = [ 1, x, x^2, ..., x^n ]
976 //
977 // Therefore, the IFFT algorithm, when applied to the vector u (the
978 // standard monomial basis) will yield the vector v of the (normalized)
979 // Lagrange polynomials.
980 //
981 // Now, because the polynomial commitment scheme is additively
982 // homomorphic, and because the commitment to the polynomial x^i is just
983 // self.g[i], we can obtain commitments to the normalized Lagrange
984 // polynomials by applying IFFT to the vector self.g[0..n].
985 //
986 //
987 // Further still, we can do the same trick for 'chunked' polynomials.
988 //
989 // Recall that a chunked polynomial is some f of degree k*n - 1 with
990 // f(x) = f_0(x) + x^n f_1(x) + ... + x^{(k-1) n} f_{k-1}(x)
991 // where each f_i has degree n-1.
992 //
993 // In the above, if we set u = [ 1, x^2, ... x^{n-1}, 0, 0, .., 0 ]
994 // then we effectively 'zero out' any polynomial terms higher than
995 // x^{n-1}, leaving us with the 'partial Lagrange polynomials' that
996 // contribute to f_0.
997 //
998 // Similarly, u = [ 0, 0, ..., 0, 1, x^2, ..., x^{n-1}, 0, 0, ..., 0]
999 // with n leading zeros 'zeroes out' all terms except the 'partial
1000 // Lagrange polynomials' that contribute to f_1, and likewise for each
1001 // f_i.
1002 //
1003 // By computing each of these, and recollecting the terms as a vector of
1004 // polynomial commitments, we obtain a chunked commitment to the L_i
1005 // polynomials.
1006 let srs_size = self.g.len();
1007 let num_elems = n.div_ceil(srs_size);
1008 let mut chunks = Vec::with_capacity(num_elems);
1009
1010 // For each chunk
1011 for i in 0..num_elems {
1012 // Initialize the vector with zero curve points
1013 let mut lg: Vec<<G as AffineRepr>::Group> = vec![<G as AffineRepr>::Group::zero(); n];
1014 // Overwrite the terms corresponding to that chunk with the SRS
1015 // curve points
1016 let start_offset = i * srs_size;
1017 let num_terms = min((i + 1) * srs_size, n) - start_offset;
1018 for j in 0..num_terms {
1019 lg[start_offset + j] = self.g[j].into_group();
1020 }
1021 // Apply the IFFT
1022 domain.ifft_in_place(&mut lg);
1023 // Append the 'partial Langrange polynomials' to the vector of elems
1024 // chunks
1025 chunks.push(<G as AffineRepr>::Group::normalize_batch(lg.as_mut_slice()));
1026 }
1027
1028 (0..n)
1029 .map(|i| PolyComm {
1030 chunks: chunks.iter().map(|v| v[i]).collect(),
1031 })
1032 .collect()
1033 }
1034}
1035
1036#[serde_as]
1037#[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
1038#[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")]
1039pub struct OpeningProof<G: AffineRepr, const FULL_ROUNDS: usize> {
1040 /// Vector of rounds of L & R commitments
1041 #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")]
1042 pub lr: Vec<(G, G)>,
1043 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1044 pub delta: G,
1045 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1046 pub z1: G::ScalarField,
1047 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1048 pub z2: G::ScalarField,
1049 /// A final folded commitment base
1050 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1051 pub sg: G,
1052}
1053
1054impl<
1055 BaseField: PrimeField,
1056 G: AffineRepr<BaseField = BaseField> + CommitmentCurve + EndoCurve,
1057 const FULL_ROUNDS: usize,
1058 > crate::OpenProof<G, FULL_ROUNDS> for OpeningProof<G, FULL_ROUNDS>
1059{
1060 type SRS = SRS<G>;
1061
1062 fn open<EFqSponge, RNG, D: EvaluationDomain<<G as AffineRepr>::ScalarField>>(
1063 srs: &Self::SRS,
1064 group_map: &<G as CommitmentCurve>::Map,
1065 plnms: PolynomialsToCombine<G, D>,
1066 elm: &[<G as AffineRepr>::ScalarField],
1067 polyscale: <G as AffineRepr>::ScalarField,
1068 evalscale: <G as AffineRepr>::ScalarField,
1069 sponge: EFqSponge,
1070 rng: &mut RNG,
1071 ) -> Self
1072 where
1073 EFqSponge: Clone
1074 + FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1075 RNG: RngCore + CryptoRng,
1076 {
1077 srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng)
1078 }
1079
1080 fn verify<EFqSponge, RNG>(
1081 srs: &Self::SRS,
1082 group_map: &G::Map,
1083 batch: &mut [BatchEvaluationProof<G, EFqSponge, Self, FULL_ROUNDS>],
1084 rng: &mut RNG,
1085 ) -> bool
1086 where
1087 EFqSponge:
1088 FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1089 RNG: RngCore + CryptoRng,
1090 {
1091 srs.verify(group_map, batch, rng)
1092 }
1093}
1094
1095/// Commitment round challenges (endo mapped) and their inverses.
1096pub struct Challenges<F> {
1097 pub chal: Vec<F>,
1098 pub chal_inv: Vec<F>,
1099}
1100
1101impl<G: AffineRepr, const FULL_ROUNDS: usize> OpeningProof<G, FULL_ROUNDS> {
1102 /// Computes a log-sized vector of scalar challenges for
1103 /// recombining elements inside the IPA.
1104 pub fn prechallenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1105 &self,
1106 sponge: &mut EFqSponge,
1107 ) -> Vec<ScalarChallenge<G::ScalarField>> {
1108 let _t = sponge.challenge_fq();
1109 self.lr
1110 .iter()
1111 .map(|(l, r)| {
1112 sponge.absorb_g(&[*l]);
1113 sponge.absorb_g(&[*r]);
1114 squeeze_prechallenge(sponge)
1115 })
1116 .collect()
1117 }
1118
1119 /// Same as `prechallenges`, but maps scalar challenges using the provided
1120 /// endomorphism, and computes their inverses.
1121 pub fn challenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1122 &self,
1123 endo_r: &G::ScalarField,
1124 sponge: &mut EFqSponge,
1125 ) -> Challenges<G::ScalarField> {
1126 let chal: Vec<_> = self
1127 .lr
1128 .iter()
1129 .map(|(l, r)| {
1130 sponge.absorb_g(&[*l]);
1131 sponge.absorb_g(&[*r]);
1132 squeeze_challenge(endo_r, sponge)
1133 })
1134 .collect();
1135
1136 let chal_inv = {
1137 let mut cs = chal.clone();
1138 ark_ff::batch_inversion(&mut cs);
1139 cs
1140 };
1141
1142 Challenges { chal, chal_inv }
1143 }
1144}
1145
1146#[cfg(feature = "ocaml_types")]
1147#[allow(non_local_definitions)]
1148pub mod caml {
1149 use super::OpeningProof;
1150 use ark_ec::AffineRepr;
1151 use ocaml;
1152
1153 #[derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)]
1154 pub struct CamlOpeningProof<G, F> {
1155 /// vector of rounds of L & R commitments
1156 pub lr: Vec<(G, G)>,
1157 pub delta: G,
1158 pub z1: F,
1159 pub z2: F,
1160 pub sg: G,
1161 }
1162
1163 impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<OpeningProof<G, FULL_ROUNDS>>
1164 for CamlOpeningProof<CamlG, CamlF>
1165 where
1166 G: AffineRepr,
1167 CamlG: From<G>,
1168 CamlF: From<G::ScalarField>,
1169 {
1170 fn from(opening_proof: OpeningProof<G, FULL_ROUNDS>) -> Self {
1171 Self {
1172 lr: opening_proof
1173 .lr
1174 .into_iter()
1175 .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2)))
1176 .collect(),
1177 delta: CamlG::from(opening_proof.delta),
1178 z1: opening_proof.z1.into(),
1179 z2: opening_proof.z2.into(),
1180 sg: CamlG::from(opening_proof.sg),
1181 }
1182 }
1183 }
1184
1185 impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<CamlOpeningProof<CamlG, CamlF>>
1186 for OpeningProof<G, FULL_ROUNDS>
1187 where
1188 G: AffineRepr,
1189 CamlG: Into<G>,
1190 CamlF: Into<G::ScalarField>,
1191 {
1192 fn from(caml: CamlOpeningProof<CamlG, CamlF>) -> Self {
1193 Self {
1194 lr: caml
1195 .lr
1196 .into_iter()
1197 .map(|(g1, g2)| (g1.into(), g2.into()))
1198 .collect(),
1199 delta: caml.delta.into(),
1200 z1: caml.z1.into(),
1201 z2: caml.z2.into(),
1202 sg: caml.sg.into(),
1203 }
1204 }
1205 }
1206}