Skip to main content

poly_commitment/
ipa.rs

1//! IPA polynomial commitment scheme.
2//!
3//! This module contains the implementation of the polynomial commitment scheme
4//! called the Inner Product Argument (IPA) as described in [Efficient
5//! Zero-Knowledge Arguments for Arithmetic Circuits in the Discrete Log
6//! Setting](https://eprint.iacr.org/2016/263).
7
8#[cfg(not(feature = "std"))]
9use crate::collections::HashMap;
10#[cfg(feature = "std")]
11use crate::hash_map_cache::HashMapCache;
12#[cfg(feature = "std")]
13use crate::precomputed_srs::TestSRS;
14use crate::{
15    commitment::{
16        b_poly, b_poly_coefficients, combine_commitments, shift_scalar, squeeze_challenge,
17        squeeze_prechallenge, BatchEvaluationProof, BlindedCommitment, CommitmentCurve, EndoCurve,
18        PolyComm,
19    },
20    error::CommitmentError,
21    SRS as SRSTrait,
22};
23#[cfg(feature = "std")]
24use crate::{utils::combine_polys, PolynomialsToCombine};
25use alloc::{vec, vec::Vec};
26use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM};
27#[cfg(feature = "std")]
28use ark_ff::{BigInteger, Field};
29use ark_ff::{One, PrimeField, UniformRand, Zero};
30#[cfg(feature = "std")]
31use ark_poly::{univariate::DensePolynomial, Evaluations};
32use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D};
33use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
34#[cfg(feature = "std")]
35use blake2::{Blake2b512, Digest};
36use core::{
37    cmp::min,
38    ops::{AddAssign, Deref},
39};
40use groupmap::GroupMap;
41use mina_poseidon::{sponge::ScalarChallenge, FqSponge};
42#[cfg(feature = "std")]
43use o1_utils::field_helpers::{inner_prod, pows};
44use o1_utils::math;
45use rand_core::{CryptoRng, RngCore};
46#[cfg(feature = "parallel")]
47use rayon::prelude::*;
48use serde::{Deserialize, Serialize};
49use serde_with::serde_as;
50#[cfg(feature = "std")]
51use zeroize::Zeroize;
52
53#[serde_as]
54#[derive(Debug, Clone, Default, Serialize, Deserialize)]
55#[serde(bound = "G: CanonicalDeserialize + CanonicalSerialize")]
56pub struct SRS<G> {
57    /// The vector of group elements for committing to polynomials in
58    /// coefficient form.
59    #[serde_as(as = "Vec<o1_utils::serialization::SerdeAs>")]
60    pub g: Vec<G>,
61
62    /// A group element used for blinding commitments
63    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
64    pub h: G,
65
66    // TODO: the following field should be separated, as they are optimization
67    // values
68    /// Commitments to Lagrange bases, per domain size
69    #[cfg(feature = "std")]
70    #[serde(skip)]
71    lagrange_bases: HashMapCache<usize, Vec<PolyComm<G>>>,
72    #[cfg(not(feature = "std"))]
73    #[serde(skip)]
74    lagrange_bases: LagrangeCache<G>,
75}
76
77#[cfg(not(feature = "std"))]
78type LagrangeCache<G> =
79    alloc::rc::Rc<core::cell::RefCell<HashMap<usize, alloc::rc::Rc<Vec<PolyComm<G>>>>>>;
80
81#[cfg(not(feature = "std"))]
82trait GetOrGenExt<G> {
83    fn get_or_generate<F: FnOnce() -> Vec<PolyComm<G>>>(
84        &self,
85        key: usize,
86        generator: F,
87    ) -> alloc::rc::Rc<Vec<PolyComm<G>>>;
88}
89
90#[cfg(not(feature = "std"))]
91impl<G> GetOrGenExt<G> for LagrangeCache<G> {
92    fn get_or_generate<F: FnOnce() -> Vec<PolyComm<G>>>(
93        &self,
94        key: usize,
95        generator: F,
96    ) -> alloc::rc::Rc<Vec<PolyComm<G>>> {
97        let mut map = self.borrow_mut();
98        let entry = map
99            .entry(key)
100            .or_insert_with(|| alloc::rc::Rc::new(generator()));
101        alloc::rc::Rc::clone(entry)
102    }
103}
104
105// SAFETY: In no-std context there is no threading. Rc<RefCell<...>> is the only
106// non-Send/Sync field, and it is only accessed from a single thread.
107#[cfg(not(feature = "std"))]
108#[allow(unsafe_code, clippy::non_send_fields_in_send_ty)]
109unsafe impl<G: Send> Send for SRS<G> {}
110#[cfg(not(feature = "std"))]
111#[allow(unsafe_code)]
112unsafe impl<G: Sync> Sync for SRS<G> {}
113
114#[cfg(feature = "std")]
115impl<G> From<TestSRS<G>> for SRS<G> {
116    fn from(value: TestSRS<G>) -> Self {
117        Self {
118            g: value.g,
119            h: value.h,
120            #[cfg(feature = "std")]
121            lagrange_bases: HashMapCache::new_from_hashmap(value.lagrange_bases),
122            #[cfg(not(feature = "std"))]
123            lagrange_bases: value.lagrange_bases,
124        }
125    }
126}
127
128#[cfg(feature = "std")]
129impl<G: Clone> From<SRS<G>> for TestSRS<G> {
130    fn from(value: SRS<G>) -> Self {
131        let lagrange_basis = value.lagrange_bases().clone();
132
133        Self {
134            g: value.g,
135            h: value.h,
136            lagrange_bases: lagrange_basis.into(),
137        }
138    }
139}
140
141impl<G> SRS<G> {
142    #[cfg(feature = "std")]
143    pub fn new(g: Vec<G>, h: G) -> Self {
144        Self {
145            g,
146            h,
147            lagrange_bases: HashMapCache::new(),
148        }
149    }
150
151    #[cfg(feature = "std")]
152    pub const fn lagrange_bases(&self) -> &HashMapCache<usize, Vec<PolyComm<G>>> {
153        &self.lagrange_bases
154    }
155
156    #[cfg(not(feature = "std"))]
157    pub const fn lagrange_bases(&self) -> &LagrangeCache<G> {
158        &self.lagrange_bases
159    }
160}
161
162impl<G> PartialEq for SRS<G>
163where
164    G: PartialEq,
165{
166    fn eq(&self, other: &Self) -> bool {
167        self.g == other.g && self.h == other.h
168    }
169}
170
171/// Computes the endomorphism coefficients (ξ, λ) for a curve.
172///
173/// For curves of the form y² = x³ + b (like Pallas and Vesta), there exists
174/// an efficient endomorphism φ defined by:
175///
176///   φ(x, y) = (ξ · x, y)
177///
178/// where ξ is a primitive cube root of unity in the base field (ξ³ = 1, ξ ≠ 1).
179/// This works because (ξx)³ = ξ³x³ = x³, so the point remains on the curve.
180///
181/// This endomorphism corresponds to scalar multiplication by λ:
182///
183///   φ(P) = \[λ\]P
184///
185/// where λ is a primitive cube root of unity in the scalar field.
186///
187/// # Returns
188///
189/// A tuple (`endo_q`, `endo_r`) where:
190/// - `endo_q` (ξ): cube root of unity in the base field `F_q`, used to compute φ(P)
191/// - `endo_r` (λ): the corresponding scalar in `F_r` such that φ(P) = \[λ\]P
192///
193/// # Mathematical Background
194///
195/// The cube root is computed as ξ = g^((p-1)/3) where g is a generator of `F_p*`.
196/// By Fermat's Little Theorem, ξ³ = g^(p-1) = 1.
197///
198/// Since there are two primitive cube roots of unity (ξ and ξ²), the function
199/// verifies which one corresponds to the endomorphism by checking:
200///
201///   \[`potential_λ`\]G == φ(G)
202///
203/// If not, it uses λ = `potential_λ²` instead.
204///
205/// # Panics
206///
207/// Panics if the generator point coordinates cannot be extracted.
208///
209/// # References
210///
211/// - Halo paper, Section 6.2: <https://eprint.iacr.org/2019/1021>
212/// - GLV method for fast scalar multiplication
213#[must_use]
214pub fn endos<G: CommitmentCurve>() -> (G::BaseField, G::ScalarField)
215where
216    G::BaseField: PrimeField,
217{
218    let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient();
219    let endo_r = {
220        let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient();
221        let t = G::generator();
222        let (x, y) = t.to_coordinates().unwrap();
223        let phi_t = G::of_coordinates(x * endo_q, y);
224        if t.mul(potential_endo_r) == phi_t.into_group() {
225            potential_endo_r
226        } else {
227            potential_endo_r * potential_endo_r
228        }
229    };
230    (endo_q, endo_r)
231}
232
233#[cfg(feature = "std")]
234fn point_of_random_bytes<G: CommitmentCurve>(map: &G::Map, random_bytes: &[u8]) -> G
235where
236    G::BaseField: Field,
237{
238    // packing in bit-representation
239    const N: usize = 31;
240    #[allow(clippy::cast_possible_truncation)]
241    let extension_degree = G::BaseField::extension_degree() as usize;
242
243    let mut base_fields = Vec::with_capacity(N * extension_degree);
244
245    for base_count in 0..extension_degree {
246        let mut bits = [false; 8 * N];
247        let offset = base_count * N;
248        for i in 0..N {
249            for j in 0..8 {
250                bits[8 * i + j] = (random_bytes[offset + i] >> j) & 1 == 1;
251            }
252        }
253
254        let n =
255            <<G::BaseField as Field>::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits);
256        let t = <<G::BaseField as Field>::BasePrimeField as PrimeField>::from_bigint(n)
257            .expect("packing code has a bug");
258        base_fields.push(t);
259    }
260
261    let t = G::BaseField::from_base_prime_field_elems(base_fields).unwrap();
262
263    let (x, y) = map.to_group(t);
264    G::of_coordinates(x, y).mul_by_cofactor()
265}
266
267/// Additional methods for the SRS structure
268impl<G: CommitmentCurve> SRS<G> {
269    /// Verifies a batch of polynomial commitment opening proofs.
270    ///
271    /// This IPA verification method is primarily designed for integration into
272    /// recursive proof systems like Pickles. In recursive proofs, IPA verification
273    /// is deferred by storing accumulators (`RecursionChallenge`)
274    /// rather than verifying immediately in-circuit. This method performs the final
275    /// out-of-circuit verification of those accumulators.
276    ///
277    /// The function reconstructs the **challenge polynomial** `b(X)` from the IPA
278    /// challenges and uses it to verify the opening proofs. The challenge polynomial
279    /// is defined as:
280    /// ```text
281    /// b(X) = prod_{i=0}^{k-1} (1 + u_{k-i} * X^{2^i})
282    /// ```
283    /// where `u_1, ..., u_k` are the challenges from the `k` rounds of the IPA protocol.
284    /// See Section 3.2 of the [Halo paper](https://eprint.iacr.org/2019/1021.pdf).
285    ///
286    /// The verification reconstructs `b(X)` in two forms:
287    /// - **Evaluation form**: `b_poly(&chal, x)` computes `b(x)` in `O(k)` operations
288    /// - **Coefficient form**: `b_poly_coefficients(&chal)` returns the `2^k` coefficients
289    ///   for the MSM `<s, G>` that verifies the accumulated commitment
290    ///
291    /// Note: The challenge polynomial reconstruction is specifically needed for recursive
292    /// proof verification. For standalone (non-recursive) IPA verification, a simpler
293    /// approach without `b(X)` reconstruction could be used.
294    ///
295    /// # Panics
296    ///
297    /// Panics if the number of scalars does not match the number of points.
298    ///
299    /// Returns `true` if verification succeeds, `false` otherwise.
300    #[allow(clippy::too_many_lines)]
301    pub fn verify<EFqSponge, RNG, const FULL_ROUNDS: usize>(
302        &self,
303        group_map: &G::Map,
304        batch: &mut [BatchEvaluationProof<
305            G,
306            EFqSponge,
307            OpeningProof<G, FULL_ROUNDS>,
308            FULL_ROUNDS,
309        >],
310        rng: &mut RNG,
311    ) -> bool
312    where
313        EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
314        RNG: RngCore + CryptoRng,
315        G::BaseField: PrimeField,
316    {
317        // Verifier checks for all i,
318        // c_i Q_i + delta_i = z1_i (G_i + b_i U_i) + z2_i H
319        //
320        // if we sample evalscale at random, it suffices to check
321        //
322        // 0 == sum_i evalscale^i (c_i Q_i + delta_i - ( z1_i (G_i + b_i U_i) + z2_i H ))
323        //
324        // and because each G_i is a multiexp on the same array self.g, we
325        // can batch the multiexp across proofs.
326        //
327        // So for each proof in the batch, we add onto our big multiexp the
328        // following terms
329        // evalscale^i c_i Q_i
330        // evalscale^i delta_i
331        // - (evalscale^i z1_i) G_i
332        // - (evalscale^i z2_i) H
333        // - (evalscale^i z1_i b_i) U_i
334
335        // We also check that the sg component of the proof is equal to the
336        // polynomial commitment to the "s" array
337
338        let nonzero_length = self.g.len();
339
340        let max_rounds = math::ceil_log2(nonzero_length);
341
342        let padded_length = 1 << max_rounds;
343
344        let (_, endo_r) = endos::<G>();
345
346        // TODO: This will need adjusting
347        let padding = padded_length - nonzero_length;
348        let mut points = vec![self.h];
349        points.extend(self.g.clone());
350        points.extend(vec![G::zero(); padding]);
351
352        let mut scalars = vec![G::ScalarField::zero(); padded_length + 1];
353        assert_eq!(scalars.len(), points.len());
354
355        // sample randomiser to scale the proofs with
356        let rand_base = G::ScalarField::rand(rng);
357        let sg_rand_base = G::ScalarField::rand(rng);
358
359        let mut rand_base_i = G::ScalarField::one();
360        let mut sg_rand_base_i = G::ScalarField::one();
361
362        for BatchEvaluationProof {
363            sponge,
364            evaluation_points,
365            polyscale,
366            evalscale,
367            evaluations,
368            opening,
369            combined_inner_product,
370        } in batch.iter_mut()
371        {
372            sponge.absorb_fr(&[shift_scalar::<G>(*combined_inner_product)]);
373
374            let u_base: G = {
375                let t = sponge.challenge_fq();
376                let (x, y) = group_map.to_group(t);
377                G::of_coordinates(x, y)
378            };
379
380            let Challenges { chal, chal_inv } = opening.challenges::<EFqSponge>(&endo_r, sponge);
381
382            sponge.absorb_g(&[opening.delta]);
383            let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
384
385            // Evaluate the challenge polynomial b(X) at each evaluation point and combine.
386            // This computes: b0 = sum_i evalscale^i * b(evaluation_point[i])
387            // where b(X) = prod_{j=0}^{k-1} (1 + u_{k-j} * X^{2^j}) is the challenge polynomial
388            // reconstructed from the IPA challenges `chal`.
389            let b0 = {
390                let mut scale = G::ScalarField::one();
391                let mut res = G::ScalarField::zero();
392                for &e in evaluation_points.iter() {
393                    let term = b_poly(&chal, e);
394                    res += &(scale * term);
395                    scale *= *evalscale;
396                }
397                res
398            };
399
400            // Compute the 2^k coefficients of the challenge polynomial b(X).
401            // These are used in the MSM <s, G> to verify the accumulated commitment.
402            let s = b_poly_coefficients(&chal);
403
404            let neg_rand_base_i = -rand_base_i;
405
406            // TERM
407            // - rand_base_i z1 G
408            //
409            // we also add -sg_rand_base_i * G to check correctness of sg.
410            points.push(opening.sg);
411            scalars.push(neg_rand_base_i * opening.z1 - sg_rand_base_i);
412
413            // Here we add
414            // sg_rand_base_i * ( < s, self.g > )
415            // =
416            // < sg_rand_base_i s, self.g >
417            //
418            // to check correctness of the sg component.
419            {
420                let terms: Vec<_> = o1_utils::cfg_iter!(s).map(|s| sg_rand_base_i * s).collect();
421
422                for (i, term) in terms.iter().enumerate() {
423                    scalars[i + 1] += term;
424                }
425            }
426
427            // TERM
428            // - rand_base_i * z2 * H
429            scalars[0] -= &(rand_base_i * opening.z2);
430
431            // TERM
432            // -rand_base_i * (z1 * b0 * U)
433            scalars.push(neg_rand_base_i * (opening.z1 * b0));
434            points.push(u_base);
435
436            // TERM
437            // rand_base_i c_i Q_i
438            // = rand_base_i c_i
439            //   (sum_j (chal_invs[j] L_j + chals[j] R_j) + P_prime)
440            // where P_prime = combined commitment + combined_inner_product * U
441            let rand_base_i_c_i = c * rand_base_i;
442            for ((l, r), (u_inv, u)) in opening.lr.iter().zip(chal_inv.iter().zip(chal.iter())) {
443                points.push(*l);
444                scalars.push(rand_base_i_c_i * u_inv);
445
446                points.push(*r);
447                scalars.push(rand_base_i_c_i * u);
448            }
449
450            // TERM
451            // sum_j evalscale^j (sum_i polyscale^i f_i) (elm_j)
452            // == sum_j sum_i evalscale^j polyscale^i f_i(elm_j)
453            // == sum_i polyscale^i sum_j evalscale^j f_i(elm_j)
454            combine_commitments(
455                evaluations,
456                &mut scalars,
457                &mut points,
458                *polyscale,
459                rand_base_i_c_i,
460            );
461
462            scalars.push(rand_base_i_c_i * *combined_inner_product);
463            points.push(u_base);
464
465            scalars.push(rand_base_i);
466            points.push(opening.delta);
467
468            rand_base_i *= &rand_base;
469            sg_rand_base_i *= &sg_rand_base;
470        }
471
472        // Verify the equation in two chunks, which is optimal for our SRS size.
473        // (see the comment to the `benchmark_msm_parallel_vesta` MSM benchmark)
474        let msm_res = {
475            #[cfg(feature = "parallel")]
476            {
477                let chunk_size = points.len() / 2;
478                points
479                    .into_par_iter()
480                    .chunks(chunk_size)
481                    .zip(scalars.into_par_iter().chunks(chunk_size))
482                    .map(|(bases, coeffs)| {
483                        let coeffs_bigint = coeffs
484                            .into_iter()
485                            .map(ark_ff::PrimeField::into_bigint)
486                            .collect::<Vec<_>>();
487                        G::Group::msm_bigint(&bases, &coeffs_bigint)
488                    })
489                    .reduce(G::Group::zero, |mut l, r| {
490                        l += r;
491                        l
492                    })
493            }
494            #[cfg(not(feature = "parallel"))]
495            {
496                let scalars_bigint: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect();
497                G::Group::msm_bigint(&points, &scalars_bigint)
498            }
499        };
500
501        msm_res == G::Group::zero()
502    }
503}
504
505#[cfg(feature = "std")]
506impl<G: CommitmentCurve> SRS<G> {
507    /// This function creates a trusted-setup SRS instance for circuits with
508    /// number of rows up to `depth`.
509    ///
510    /// # Security
511    ///
512    /// The internal accumulator `x_pow` is zeroized before returning.
513    /// The caller must ensure that `x` is securely zeroized after use.
514    /// Leaking `x` compromises the soundness of the proof system.
515    pub fn create_trusted_setup_with_toxic_waste(x: G::ScalarField, depth: usize) -> Self {
516        let m = G::Map::setup();
517
518        let mut x_pow = G::ScalarField::one();
519        let g: Vec<_> = (0..depth)
520            .map(|_| {
521                let res = G::generator().mul(x_pow);
522                x_pow *= x;
523                res.into_affine()
524            })
525            .collect();
526
527        // Zeroize internal accumulator derived from toxic waste
528        x_pow.zeroize();
529
530        // Compute a blinder
531        let h = {
532            let mut h = Blake2b512::new();
533            h.update(b"srs_misc");
534            // FIXME: This is for retrocompatibility with a previous version
535            // that was using a list initialisation. It is not necessary.
536            h.update(0_u32.to_be_bytes());
537            point_of_random_bytes(&m, &h.finalize())
538        };
539
540        Self {
541            g,
542            h,
543            lagrange_bases: HashMapCache::new(),
544        }
545    }
546}
547
548#[cfg(feature = "parallel")]
549impl<G: CommitmentCurve> SRS<G>
550where
551    <G as CommitmentCurve>::Map: Sync,
552    G::BaseField: PrimeField,
553{
554    /// This function creates SRS instance for circuits with number of rows up
555    /// to `depth`.
556    ///
557    /// # Panics
558    ///
559    /// Panics if `depth` exceeds `u32::MAX`.
560    #[must_use]
561    pub fn create_parallel(depth: usize) -> Self {
562        let m = G::Map::setup();
563
564        let g: Vec<_> = (0..depth)
565            .into_par_iter()
566            .map(|i| {
567                let mut h = Blake2b512::new();
568                #[allow(clippy::cast_possible_truncation)]
569                h.update((i as u32).to_be_bytes());
570                point_of_random_bytes(&m, &h.finalize())
571            })
572            .collect();
573
574        // Compute a blinder
575        let h = {
576            let mut h = Blake2b512::new();
577            h.update(b"srs_misc");
578            // FIXME: This is for retrocompatibility with a previous version
579            // that was using a list initialisation. It is not necessary.
580            h.update(0_u32.to_be_bytes());
581            point_of_random_bytes(&m, &h.finalize())
582        };
583
584        Self {
585            g,
586            h,
587            lagrange_bases: HashMapCache::new(),
588        }
589    }
590}
591
592impl<G> SRSTrait<G> for SRS<G>
593where
594    G: CommitmentCurve,
595{
596    /// The maximum polynomial degree that can be committed to
597    fn max_poly_size(&self) -> usize {
598        self.g.len()
599    }
600
601    fn blinding_commitment(&self) -> G {
602        self.h
603    }
604
605    fn mask_custom(
606        &self,
607        com: PolyComm<G>,
608        blinders: &PolyComm<G::ScalarField>,
609    ) -> Result<BlindedCommitment<G>, CommitmentError> {
610        let commitment = com
611            .zip(blinders)
612            .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))?
613            .map(|(g, b)| {
614                let mut g_masked = self.h.mul(b);
615                g_masked.add_assign(&g);
616                g_masked.into_affine()
617            });
618        Ok(BlindedCommitment {
619            commitment,
620            blinders: blinders.clone(),
621        })
622    }
623
624    /// Turns a non-hiding polynomial commitment into a hiding polynomial
625    /// commitment. Transforms each given `<a, G>` into `(<a, G> + wH, w)` with
626    /// a random `w` per commitment.
627    #[cfg(feature = "std")]
628    fn mask(
629        &self,
630        comm: PolyComm<G>,
631        rng: &mut (impl RngCore + CryptoRng),
632    ) -> BlindedCommitment<G> {
633        let blinders = comm.map(|_| G::ScalarField::rand(rng));
634        self.mask_custom(comm, &blinders).unwrap()
635    }
636
637    #[cfg(feature = "std")]
638    fn commit_non_hiding(
639        &self,
640        plnm: &DensePolynomial<G::ScalarField>,
641        num_chunks: usize,
642    ) -> PolyComm<G> {
643        let is_zero = plnm.is_zero();
644
645        // chunk while committing
646        let mut chunks: Vec<_> = if is_zero {
647            vec![G::zero()]
648        } else if plnm.len() < self.g.len() {
649            vec![G::Group::msm(&self.g[..plnm.len()], &plnm.coeffs)
650                .unwrap()
651                .into_affine()]
652        } else if plnm.len() == self.g.len() {
653            // when processing a single chunk, it's faster to parallelise
654            // vertically in 2 threads (see the comment to the
655            // `benchmark_msm_parallel_vesta` MSM benchmark)
656            let n = self.g.len();
657            let (r1, r2) = rayon::join(
658                || G::Group::msm(&self.g[..n / 2], &plnm.coeffs[..n / 2]).unwrap(),
659                || G::Group::msm(&self.g[n / 2..n], &plnm.coeffs[n / 2..n]).unwrap(),
660            );
661
662            vec![(r1 + r2).into_affine()]
663        } else {
664            // otherwise it's better to parallelise horizontally along chunks
665            plnm.into_par_iter()
666                .chunks(self.g.len())
667                .map(|chunk| {
668                    let chunk_coeffs = chunk
669                        .into_iter()
670                        .map(|c| c.into_bigint())
671                        .collect::<Vec<_>>();
672                    let chunk_res = G::Group::msm_bigint(&self.g, &chunk_coeffs);
673                    chunk_res.into_affine()
674                })
675                .collect()
676        };
677
678        for _ in chunks.len()..num_chunks {
679            chunks.push(G::zero());
680        }
681
682        PolyComm::<G>::new(chunks)
683    }
684
685    #[cfg(feature = "std")]
686    fn commit(
687        &self,
688        plnm: &DensePolynomial<G::ScalarField>,
689        num_chunks: usize,
690        rng: &mut (impl RngCore + CryptoRng),
691    ) -> BlindedCommitment<G> {
692        self.mask(self.commit_non_hiding(plnm, num_chunks), rng)
693    }
694
695    #[cfg(feature = "std")]
696    fn commit_custom(
697        &self,
698        plnm: &DensePolynomial<G::ScalarField>,
699        num_chunks: usize,
700        blinders: &PolyComm<G::ScalarField>,
701    ) -> Result<BlindedCommitment<G>, CommitmentError> {
702        self.mask_custom(self.commit_non_hiding(plnm, num_chunks), blinders)
703    }
704
705    #[cfg(feature = "std")]
706    fn commit_evaluations_non_hiding(
707        &self,
708        domain: D<G::ScalarField>,
709        plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
710    ) -> PolyComm<G> {
711        let basis = &*self.get_lagrange_basis(domain);
712        let commit_evaluations = |evals: &Vec<G::ScalarField>, basis: &Vec<PolyComm<G>>| {
713            let basis_refs: Vec<_> = basis.iter().collect();
714            PolyComm::<G>::multi_scalar_mul(&basis_refs, evals)
715        };
716        match domain.size.cmp(&plnm.domain().size) {
717            core::cmp::Ordering::Less => {
718                #[allow(clippy::cast_possible_truncation)]
719                let s = (plnm.domain().size / domain.size) as usize;
720                let v: Vec<_> = (0..(domain.size())).map(|i| plnm.evals[s * i]).collect();
721                commit_evaluations(&v, basis)
722            }
723            core::cmp::Ordering::Equal => commit_evaluations(&plnm.evals, basis),
724            core::cmp::Ordering::Greater => {
725                panic!("desired commitment domain size ({}) greater than evaluations' domain size ({}):", domain.size, plnm.domain().size)
726            }
727        }
728    }
729
730    #[cfg(feature = "std")]
731    fn commit_evaluations(
732        &self,
733        domain: D<G::ScalarField>,
734        plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
735        rng: &mut (impl RngCore + CryptoRng),
736    ) -> BlindedCommitment<G> {
737        self.mask(self.commit_evaluations_non_hiding(domain, plnm), rng)
738    }
739
740    #[cfg(feature = "std")]
741    fn commit_evaluations_custom(
742        &self,
743        domain: D<G::ScalarField>,
744        plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
745        blinders: &PolyComm<G::ScalarField>,
746    ) -> Result<BlindedCommitment<G>, CommitmentError> {
747        self.mask_custom(self.commit_evaluations_non_hiding(domain, plnm), blinders)
748    }
749
750    #[cfg(feature = "std")]
751    fn create(depth: usize) -> Self {
752        let m = G::Map::setup();
753
754        let g: Vec<_> = (0..depth)
755            .map(|i| {
756                let mut h = Blake2b512::new();
757                #[allow(clippy::cast_possible_truncation)]
758                h.update((i as u32).to_be_bytes());
759                point_of_random_bytes(&m, &h.finalize())
760            })
761            .collect();
762
763        // Compute a blinder
764        let h = {
765            let mut h = Blake2b512::new();
766            h.update(b"srs_misc");
767            // FIXME: This is for retrocompatibility with a previous version
768            // that was using a list initialisation. It is not necessary.
769            h.update(0_u32.to_be_bytes());
770            point_of_random_bytes(&m, &h.finalize())
771        };
772
773        Self {
774            g,
775            h,
776            lagrange_bases: HashMapCache::new(),
777        }
778    }
779
780    fn get_lagrange_basis_from_domain_size(
781        &self,
782        domain_size: usize,
783    ) -> impl Deref<Target = Vec<PolyComm<G>>> + '_ {
784        self.lagrange_bases.get_or_generate(domain_size, move || {
785            self.lagrange_basis(D::new(domain_size).unwrap())
786        })
787    }
788
789    fn get_lagrange_basis(
790        &self,
791        domain: D<G::ScalarField>,
792    ) -> impl Deref<Target = Vec<PolyComm<G>>> + '_ {
793        self.lagrange_bases
794            .get_or_generate(domain.size(), move || self.lagrange_basis(domain))
795    }
796
797    #[cfg(feature = "std")]
798    fn size(&self) -> usize {
799        self.g.len()
800    }
801}
802
803#[cfg(feature = "std")]
804impl<G: CommitmentCurve> SRS<G> {
805    /// Creates an opening proof for a batch of polynomial commitments.
806    ///
807    /// This function implements the IPA (Inner Product Argument) prover. During the
808    /// `k = log_2(n)` rounds of folding, it implicitly constructs the **challenge
809    /// polynomial** `b(X)`.
810    ///
811    /// Note: The use of the challenge polynomial `b(X)` is a modification to the
812    /// original IPA protocol that improves efficiency in recursive proof settings. The challenge
813    /// polynomial is inspired from the "Amoritization strategy"" from [Recursive Proof
814    /// Composition without a Trusted Setup](https://eprint.iacr.org/2019/1021.pdf), section 3.2.
815    ///
816    /// # Panics
817    ///
818    /// Panics if IPA folding does not produce single elements after log rounds,
819    /// or if the challenge inverse cannot be computed.
820    #[allow(clippy::type_complexity)]
821    #[allow(clippy::many_single_char_names)]
822    #[allow(clippy::too_many_lines)]
823    pub fn open<EFqSponge, RNG, D: EvaluationDomain<G::ScalarField>, const FULL_ROUNDS: usize>(
824        &self,
825        group_map: &G::Map,
826        plnms: PolynomialsToCombine<G, D>,
827        elm: &[G::ScalarField],
828        polyscale: G::ScalarField,
829        evalscale: G::ScalarField,
830        mut sponge: EFqSponge,
831        rng: &mut RNG,
832    ) -> OpeningProof<G, FULL_ROUNDS>
833    where
834        EFqSponge: Clone + FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
835        RNG: RngCore + CryptoRng,
836        G::BaseField: PrimeField,
837        G: EndoCurve,
838    {
839        let (endo_q, endo_r) = endos::<G>();
840
841        let rounds = math::ceil_log2(self.g.len());
842        let padded_length = 1 << rounds;
843
844        // TODO: Trim this to the degree of the largest polynomial
845        // TODO: We do always suppose we have a power of 2 for the SRS in
846        // practice. Therefore, padding equals zero, and this code can be
847        // removed. Only a current test case uses a SRS with a non-power of 2.
848        let padding = padded_length - self.g.len();
849        let mut g = self.g.clone();
850        g.extend(vec![G::zero(); padding]);
851
852        // Combines polynomials roughly as follows: p(X) := ∑_i polyscale^i p_i(X)
853        //
854        // `blinding_factor` is a combined set of commitments that are
855        // paired with polynomials in `plnms`. In kimchi, these input
856        // commitments are poly com blinders, so often `[G::ScalarField::one();
857        // num_chunks]` or zeroes.
858        let (p, blinding_factor) = combine_polys::<G, D>(plnms, polyscale, self.g.len());
859
860        // The initial evaluation vector for polynomial commitment b_init is not
861        // just the powers of a single point as in the original IPA
862        // (1,ζ,ζ^2,...)
863        //
864        // but rather a vector of linearly combined powers with `evalscale` as
865        // recombiner.
866        //
867        // b_init[j] = Σ_i evalscale^i elm_i^j
868        //           = ζ^j + evalscale * ζ^j ω^j (in the specific case of challenges (ζ,ζω))
869        //
870        // So in our case b_init is the following vector:
871        //    1 + evalscale
872        //    ζ + evalscale * ζ ω
873        //    ζ^2 + evalscale * (ζ ω)^2
874        //    ζ^3 + evalscale * (ζ ω)^3
875        //    ...
876        let b_init = {
877            // randomise/scale the eval powers
878            let mut scale = G::ScalarField::one();
879            let mut res: Vec<G::ScalarField> =
880                (0..padded_length).map(|_| G::ScalarField::zero()).collect();
881            for e in elm {
882                for (i, t) in pows(padded_length, *e).iter().enumerate() {
883                    res[i] += &(scale * t);
884                }
885                scale *= &evalscale;
886            }
887            res
888        };
889
890        // Combined polynomial p(X) evaluated at the combined eval point b_init.
891        let combined_inner_product = p
892            .coeffs
893            .iter()
894            .zip(b_init.iter())
895            .map(|(a, b)| *a * b)
896            .fold(G::ScalarField::zero(), |acc, x| acc + x);
897
898        // Usually, the prover sends `combined_inner_product`` to the verifier
899        // So we should absorb `combined_inner_product``
900        // However it is more efficient in the recursion circuit
901        // to absorb a slightly modified version of it.
902        // As a reminder, in a recursive setting, the challenges are given as a
903        // public input and verified in the next iteration.
904        // See the `shift_scalar`` doc.
905        sponge.absorb_fr(&[shift_scalar::<G>(combined_inner_product)]);
906
907        // Generate another randomisation base U; our commitments will be w.r.t
908        // bases {G_i},H,U.
909        let u_base: G = {
910            let t = sponge.challenge_fq();
911            let (x, y) = group_map.to_group(t);
912            G::of_coordinates(x, y)
913        };
914
915        let mut a = p.coeffs;
916        assert!(padded_length >= a.len());
917        a.extend(vec![G::ScalarField::zero(); padded_length - a.len()]);
918
919        let mut b = b_init;
920
921        let mut lr = vec![];
922
923        let mut blinders = vec![];
924
925        let mut chals = vec![];
926        let mut chal_invs = vec![];
927
928        // The main IPA folding loop that has log iterations.
929        for _ in 0..rounds {
930            let n = g.len() / 2;
931            // Pedersen bases
932            let (g_lo, g_hi) = (&g[0..n], &g[n..]);
933            // Polynomial coefficients
934            let (a_lo, a_hi) = (&a[0..n], &a[n..]);
935            // Evaluation points
936            let (b_lo, b_hi) = (&b[0..n], &b[n..]);
937
938            // Blinders for L/R
939            let rand_l = <G::ScalarField as UniformRand>::rand(rng);
940            let rand_r = <G::ScalarField as UniformRand>::rand(rng);
941
942            // Pedersen commitment to a_lo,rand_l,<a_hi,b_lo>
943            let l = G::Group::msm_bigint(
944                &[g_lo, &[self.h, u_base]].concat(),
945                &[a_hi, &[rand_l, inner_prod(a_hi, b_lo)]]
946                    .concat()
947                    .iter()
948                    .map(|x| x.into_bigint())
949                    .collect::<Vec<_>>(),
950            )
951            .into_affine();
952
953            let r = G::Group::msm_bigint(
954                &[g_hi, &[self.h, u_base]].concat(),
955                &[a_lo, &[rand_r, inner_prod(a_lo, b_hi)]]
956                    .concat()
957                    .iter()
958                    .map(|x| x.into_bigint())
959                    .collect::<Vec<_>>(),
960            )
961            .into_affine();
962
963            lr.push((l, r));
964            blinders.push((rand_l, rand_r));
965
966            sponge.absorb_g(&[l]);
967            sponge.absorb_g(&[r]);
968
969            // Round #i challenges;
970            // - not to be confused with "u_base"
971            // - not to be confused with "u" as "polyscale"
972            let u_pre = squeeze_prechallenge(&mut sponge);
973            let u = u_pre.to_field(&endo_r);
974            let u_inv = u.inverse().unwrap();
975
976            chals.push(u);
977            chal_invs.push(u_inv);
978
979            // IPA-folding polynomial coefficients
980            a = o1_utils::cfg_iter!(a_hi)
981                .zip(a_lo)
982                .map(|(&hi, &lo)| {
983                    // lo + u_inv * hi
984                    let mut res = hi;
985                    res *= u_inv;
986                    res += &lo;
987                    res
988                })
989                .collect();
990
991            // IPA-folding evaluation points.
992            // This folding implicitly constructs the challenge polynomial b(X):
993            // after all rounds, b[0] = b_poly(chals, evaluation_point).
994            b = o1_utils::cfg_iter!(b_lo)
995                .zip(b_hi)
996                .map(|(&lo, &hi)| {
997                    // lo + u * hi
998                    let mut res = hi;
999                    res *= u;
1000                    res += &lo;
1001                    res
1002                })
1003                .collect();
1004
1005            // IPA-folding bases
1006            g = G::combine_one_endo(endo_r, endo_q, g_lo, g_hi, &u_pre);
1007        }
1008
1009        assert!(
1010            g.len() == 1 && a.len() == 1 && b.len() == 1,
1011            "IPA commitment folding must produce single elements after log rounds"
1012        );
1013        let a0 = a[0];
1014        // b0 is the folded evaluation point, equal to b_poly(chals, evaluation_point).
1015        // Note: The folding of `b` (and this extraction) could be skipped in a
1016        // non-recursive setting where the verifier doesn't need to recompute
1017        // the evaluation from challenges using b_poly.
1018        let b0 = b[0];
1019        let g0 = g[0];
1020
1021        // Compute r_prime, a folded blinder. It combines blinders on
1022        // each individual step of the IPA folding process together
1023        // with the final blinding_factor of the polynomial.
1024        //
1025        // r_prime := ∑_i (rand_l[i] * u[i]^{-1} + rand_r * u[i])
1026        //          + blinding_factor
1027        //
1028        // where u is a vector of folding challenges, and rand_l/rand_r are
1029        // intermediate L/R blinders.
1030        let r_prime = blinders
1031            .iter()
1032            .zip(chals.iter().zip(chal_invs.iter()))
1033            .map(|((rand_l, rand_r), (u, u_inv))| ((*rand_l) * u_inv) + (*rand_r * u))
1034            .fold(blinding_factor, |acc, x| acc + x);
1035
1036        let d = <G::ScalarField as UniformRand>::rand(rng);
1037        let r_delta = <G::ScalarField as UniformRand>::rand(rng);
1038
1039        // Compute delta, the commitment
1040        // delta = [d] G0 + \
1041        //         [b0*d] U_base + \
1042        //         [r_delta] H^r (as a group element, in additive notation)
1043        let delta = ((g0.into_group() + (u_base.mul(b0))).into_affine().mul(d)
1044            + self.h.mul(r_delta))
1045        .into_affine();
1046
1047        sponge.absorb_g(&[delta]);
1048        let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
1049
1050        // (?) Schnorr-like responses showing the knowledge of r_prime and a0.
1051        let z1 = a0 * c + d;
1052        let z2 = r_prime * c + r_delta;
1053
1054        OpeningProof {
1055            delta,
1056            lr,
1057            z1,
1058            z2,
1059            sg: g0,
1060        }
1061    }
1062}
1063
1064impl<G: CommitmentCurve> SRS<G> {
1065    fn lagrange_basis(&self, domain: D<G::ScalarField>) -> Vec<PolyComm<G>> {
1066        let n = domain.size();
1067
1068        // Let V be a vector space over the field F.
1069        //
1070        // Given
1071        // - a domain [ 1, w, w^2, ..., w^{n - 1} ]
1072        // - a vector v := [ v_0, ..., v_{n - 1} ] in V^n
1073        //
1074        // the FFT algorithm computes the matrix application
1075        //
1076        // u = M(w) * v
1077        //
1078        // where
1079        // M(w) =
1080        //   1 1       1           ... 1
1081        //   1 w       w^2         ... w^{n-1}
1082        //   ...
1083        //   1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1}
1084        //
1085        // The IFFT algorithm computes
1086        //
1087        // v = M(w)^{-1} * u
1088        //
1089        // Let's see how we can use this algorithm to compute the lagrange basis
1090        // commitments.
1091        //
1092        // Let V be the vector space F[x] of polynomials in x over F.
1093        // Let v in V be the vector [ L_0, ..., L_{n - 1} ] where L_i is the i^{th}
1094        // normalized Lagrange polynomial (where L_i(w^j) = j == i ? 1 : 0).
1095        //
1096        // Consider the rows of M(w) * v. Let me write out the matrix and vector
1097        // so you can see more easily.
1098        //
1099        //   | 1 1       1           ... 1               |   | L_0     |
1100        //   | 1 w       w^2         ... w^{n-1}         | * | L_1     |
1101        //   | ...                                       |   | ...     |
1102        //   | 1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1} |   | L_{n-1} |
1103        //
1104        // The 0th row is L_0 + L1 + ... + L_{n - 1}. So, it's the polynomial
1105        // that has the value 1 on every element of the domain.
1106        // In other words, it's the polynomial 1.
1107        //
1108        // The 1st row is L_0 + w L_1 + ... + w^{n - 1} L_{n - 1}. So, it's the
1109        // polynomial which has value w^i on w^i.
1110        // In other words, it's the polynomial x.
1111        //
1112        // In general, you can see that row i is in fact the polynomial x^i.
1113        //
1114        // Thus, M(w) * v is the vector u, where u = [ 1, x, x^2, ..., x^n ]
1115        //
1116        // Therefore, the IFFT algorithm, when applied to the vector u (the
1117        // standard monomial basis) will yield the vector v of the (normalized)
1118        // Lagrange polynomials.
1119        //
1120        // Now, because the polynomial commitment scheme is additively
1121        // homomorphic, and because the commitment to the polynomial x^i is just
1122        // self.g[i], we can obtain commitments to the normalized Lagrange
1123        // polynomials by applying IFFT to the vector self.g[0..n].
1124        //
1125        //
1126        // Further still, we can do the same trick for 'chunked' polynomials.
1127        //
1128        // Recall that a chunked polynomial is some f of degree k*n - 1 with
1129        // f(x) = f_0(x) + x^n f_1(x) + ... + x^{(k-1) n} f_{k-1}(x)
1130        // where each f_i has degree n-1.
1131        //
1132        // In the above, if we set u = [ 1, x^2, ... x^{n-1}, 0, 0, .., 0 ]
1133        // then we effectively 'zero out' any polynomial terms higher than
1134        // x^{n-1}, leaving us with the 'partial Lagrange polynomials' that
1135        // contribute to f_0.
1136        //
1137        // Similarly, u = [ 0, 0, ..., 0, 1, x^2, ..., x^{n-1}, 0, 0, ..., 0]
1138        // with n leading zeros 'zeroes out' all terms except the 'partial
1139        // Lagrange polynomials' that contribute to f_1, and likewise for each
1140        // f_i.
1141        //
1142        // By computing each of these, and recollecting the terms as a vector of
1143        // polynomial commitments, we obtain a chunked commitment to the L_i
1144        // polynomials.
1145        let srs_size = self.g.len();
1146        let num_elems = n.div_ceil(srs_size);
1147        let mut chunks = Vec::with_capacity(num_elems);
1148
1149        // For each chunk
1150        for i in 0..num_elems {
1151            // Initialize the vector with zero curve points
1152            let mut lg: Vec<<G as AffineRepr>::Group> = vec![<G as AffineRepr>::Group::zero(); n];
1153            // Overwrite the terms corresponding to that chunk with the SRS
1154            // curve points
1155            let start_offset = i * srs_size;
1156            let num_terms = min((i + 1) * srs_size, n) - start_offset;
1157            for j in 0..num_terms {
1158                lg[start_offset + j] = self.g[j].into_group();
1159            }
1160            // Apply the IFFT
1161            domain.ifft_in_place(&mut lg);
1162            // Append the 'partial Langrange polynomials' to the vector of elems
1163            // chunks
1164            chunks.push(<G as AffineRepr>::Group::normalize_batch(lg.as_mut_slice()));
1165        }
1166
1167        (0..n)
1168            .map(|i| PolyComm {
1169                chunks: chunks.iter().map(|v| v[i]).collect(),
1170            })
1171            .collect()
1172    }
1173}
1174
1175#[serde_as]
1176#[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
1177#[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")]
1178pub struct OpeningProof<G: AffineRepr, const FULL_ROUNDS: usize> {
1179    /// Vector of rounds of L & R commitments
1180    #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")]
1181    pub lr: Vec<(G, G)>,
1182    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1183    pub delta: G,
1184    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1185    pub z1: G::ScalarField,
1186    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1187    pub z2: G::ScalarField,
1188    /// A final folded commitment base
1189    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1190    pub sg: G,
1191}
1192
1193impl<
1194        BaseField: PrimeField,
1195        G: AffineRepr<BaseField = BaseField> + CommitmentCurve + EndoCurve,
1196        const FULL_ROUNDS: usize,
1197    > crate::OpenProof<G, FULL_ROUNDS> for OpeningProof<G, FULL_ROUNDS>
1198{
1199    type SRS = SRS<G>;
1200
1201    #[cfg(feature = "std")]
1202    fn open<EFqSponge, RNG, D: EvaluationDomain<<G as AffineRepr>::ScalarField>>(
1203        srs: &Self::SRS,
1204        group_map: &<G as CommitmentCurve>::Map,
1205        plnms: PolynomialsToCombine<G, D>,
1206        elm: &[<G as AffineRepr>::ScalarField],
1207        polyscale: <G as AffineRepr>::ScalarField,
1208        evalscale: <G as AffineRepr>::ScalarField,
1209        sponge: EFqSponge,
1210        rng: &mut RNG,
1211    ) -> Self
1212    where
1213        EFqSponge: Clone
1214            + FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1215        RNG: RngCore + CryptoRng,
1216    {
1217        srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng)
1218    }
1219
1220    fn verify<EFqSponge, RNG>(
1221        srs: &Self::SRS,
1222        group_map: &G::Map,
1223        batch: &mut [BatchEvaluationProof<G, EFqSponge, Self, FULL_ROUNDS>],
1224        rng: &mut RNG,
1225    ) -> bool
1226    where
1227        EFqSponge:
1228            FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1229        RNG: RngCore + CryptoRng,
1230    {
1231        srs.verify(group_map, batch, rng)
1232    }
1233}
1234
1235/// Commitment round challenges (endo mapped) and their inverses.
1236pub struct Challenges<F> {
1237    pub chal: Vec<F>,
1238    pub chal_inv: Vec<F>,
1239}
1240
1241impl<G: AffineRepr, const FULL_ROUNDS: usize> OpeningProof<G, FULL_ROUNDS> {
1242    /// Computes a log-sized vector of scalar challenges for
1243    /// recombining elements inside the IPA.
1244    pub fn prechallenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1245        &self,
1246        sponge: &mut EFqSponge,
1247    ) -> Vec<ScalarChallenge<G::ScalarField>> {
1248        let _t = sponge.challenge_fq();
1249        self.lr
1250            .iter()
1251            .map(|(l, r)| {
1252                sponge.absorb_g(&[*l]);
1253                sponge.absorb_g(&[*r]);
1254                squeeze_prechallenge(sponge)
1255            })
1256            .collect()
1257    }
1258
1259    /// Same as `prechallenges`, but maps scalar challenges using the provided
1260    /// endomorphism, and computes their inverses.
1261    pub fn challenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1262        &self,
1263        endo_r: &G::ScalarField,
1264        sponge: &mut EFqSponge,
1265    ) -> Challenges<G::ScalarField> {
1266        let chal: Vec<_> = self
1267            .lr
1268            .iter()
1269            .map(|(l, r)| {
1270                sponge.absorb_g(&[*l]);
1271                sponge.absorb_g(&[*r]);
1272                squeeze_challenge(endo_r, sponge)
1273            })
1274            .collect();
1275
1276        let chal_inv = {
1277            let mut cs = chal.clone();
1278            ark_ff::batch_inversion(&mut cs);
1279            cs
1280        };
1281
1282        Challenges { chal, chal_inv }
1283    }
1284}
1285
1286#[cfg(feature = "ocaml_types")]
1287#[allow(non_local_definitions)]
1288pub mod caml {
1289    use super::OpeningProof;
1290    use ark_ec::AffineRepr;
1291    use ocaml;
1292
1293    #[derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)]
1294    pub struct CamlOpeningProof<G, F> {
1295        /// vector of rounds of L & R commitments
1296        pub lr: Vec<(G, G)>,
1297        pub delta: G,
1298        pub z1: F,
1299        pub z2: F,
1300        pub sg: G,
1301    }
1302
1303    impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<OpeningProof<G, FULL_ROUNDS>>
1304        for CamlOpeningProof<CamlG, CamlF>
1305    where
1306        G: AffineRepr,
1307        CamlG: From<G>,
1308        CamlF: From<G::ScalarField>,
1309    {
1310        fn from(opening_proof: OpeningProof<G, FULL_ROUNDS>) -> Self {
1311            Self {
1312                lr: opening_proof
1313                    .lr
1314                    .into_iter()
1315                    .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2)))
1316                    .collect(),
1317                delta: CamlG::from(opening_proof.delta),
1318                z1: opening_proof.z1.into(),
1319                z2: opening_proof.z2.into(),
1320                sg: CamlG::from(opening_proof.sg),
1321            }
1322        }
1323    }
1324
1325    impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<CamlOpeningProof<CamlG, CamlF>>
1326        for OpeningProof<G, FULL_ROUNDS>
1327    where
1328        G: AffineRepr,
1329        CamlG: Into<G>,
1330        CamlF: Into<G::ScalarField>,
1331    {
1332        fn from(caml: CamlOpeningProof<CamlG, CamlF>) -> Self {
1333            Self {
1334                lr: caml
1335                    .lr
1336                    .into_iter()
1337                    .map(|(g1, g2)| (g1.into(), g2.into()))
1338                    .collect(),
1339                delta: caml.delta.into(),
1340                z1: caml.z1.into(),
1341                z2: caml.z2.into(),
1342                sg: caml.sg.into(),
1343            }
1344        }
1345    }
1346}