poly_commitment/
ipa.rs

1//! This module contains the implementation of the polynomial commitment scheme
2//! called the Inner Product Argument (IPA) as described in [Efficient
3//! Zero-Knowledge Arguments for Arithmetic Circuits in the Discrete Log
4//! Setting](https://eprint.iacr.org/2016/263)
5
6use crate::{
7    commitment::{
8        b_poly, b_poly_coefficients, combine_commitments, shift_scalar, squeeze_challenge,
9        squeeze_prechallenge, BatchEvaluationProof, CommitmentCurve, EndoCurve,
10    },
11    error::CommitmentError,
12    hash_map_cache::HashMapCache,
13    utils::combine_polys,
14    BlindedCommitment, PolyComm, PolynomialsToCombine, SRS as SRSTrait,
15};
16use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM};
17use ark_ff::{BigInteger, Field, One, PrimeField, UniformRand, Zero};
18use ark_poly::{
19    univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D,
20};
21use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
22use blake2::{Blake2b512, Digest};
23use groupmap::GroupMap;
24use mina_poseidon::{sponge::ScalarChallenge, FqSponge};
25use o1_utils::{
26    field_helpers::{inner_prod, pows},
27    math,
28};
29use rand::{CryptoRng, RngCore};
30use rayon::prelude::*;
31use serde::{Deserialize, Serialize};
32use serde_with::serde_as;
33use std::{cmp::min, iter::Iterator, ops::AddAssign};
34
35#[serde_as]
36#[derive(Debug, Clone, Default, Serialize, Deserialize)]
37#[serde(bound = "G: CanonicalDeserialize + CanonicalSerialize")]
38pub struct SRS<G> {
39    /// The vector of group elements for committing to polynomials in
40    /// coefficient form.
41    #[serde_as(as = "Vec<o1_utils::serialization::SerdeAs>")]
42    pub g: Vec<G>,
43
44    /// A group element used for blinding commitments
45    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
46    pub h: G,
47
48    // TODO: the following field should be separated, as they are optimization
49    // values
50    /// Commitments to Lagrange bases, per domain size
51    #[serde(skip)]
52    pub lagrange_bases: HashMapCache<usize, Vec<PolyComm<G>>>,
53}
54
55impl<G> PartialEq for SRS<G>
56where
57    G: PartialEq,
58{
59    fn eq(&self, other: &Self) -> bool {
60        self.g == other.g && self.h == other.h
61    }
62}
63
64pub fn endos<G: CommitmentCurve>() -> (G::BaseField, G::ScalarField)
65where
66    G::BaseField: PrimeField,
67{
68    let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient();
69    let endo_r = {
70        let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient();
71        let t = G::generator();
72        let (x, y) = t.to_coordinates().unwrap();
73        let phi_t = G::of_coordinates(x * endo_q, y);
74        if t.mul(potential_endo_r) == phi_t.into_group() {
75            potential_endo_r
76        } else {
77            potential_endo_r * potential_endo_r
78        }
79    };
80    (endo_q, endo_r)
81}
82
83fn point_of_random_bytes<G: CommitmentCurve>(map: &G::Map, random_bytes: &[u8]) -> G
84where
85    G::BaseField: Field,
86{
87    // packing in bit-representation
88    const N: usize = 31;
89    let extension_degree = G::BaseField::extension_degree() as usize;
90
91    let mut base_fields = Vec::with_capacity(N * extension_degree);
92
93    for base_count in 0..extension_degree {
94        let mut bits = [false; 8 * N];
95        let offset = base_count * N;
96        for i in 0..N {
97            for j in 0..8 {
98                bits[8 * i + j] = (random_bytes[offset + i] >> j) & 1 == 1;
99            }
100        }
101
102        let n =
103            <<G::BaseField as Field>::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits);
104        let t = <<G::BaseField as Field>::BasePrimeField as PrimeField>::from_bigint(n)
105            .expect("packing code has a bug");
106        base_fields.push(t)
107    }
108
109    let t = G::BaseField::from_base_prime_field_elems(base_fields).unwrap();
110
111    let (x, y) = map.to_group(t);
112    G::of_coordinates(x, y).mul_by_cofactor()
113}
114
115/// Additional methods for the SRS structure
116impl<G: CommitmentCurve> SRS<G> {
117    /// This function verifies a batch of polynomial commitment opening proofs.
118    /// Return `true` if the verification is successful, `false` otherwise.
119    pub fn verify<EFqSponge, RNG>(
120        &self,
121        group_map: &G::Map,
122        batch: &mut [BatchEvaluationProof<G, EFqSponge, OpeningProof<G>>],
123        rng: &mut RNG,
124    ) -> bool
125    where
126        EFqSponge: FqSponge<G::BaseField, G, G::ScalarField>,
127        RNG: RngCore + CryptoRng,
128        G::BaseField: PrimeField,
129    {
130        // Verifier checks for all i,
131        // c_i Q_i + delta_i = z1_i (G_i + b_i U_i) + z2_i H
132        //
133        // if we sample evalscale at random, it suffices to check
134        //
135        // 0 == sum_i evalscale^i (c_i Q_i + delta_i - ( z1_i (G_i + b_i U_i) + z2_i H ))
136        //
137        // and because each G_i is a multiexp on the same array self.g, we
138        // can batch the multiexp across proofs.
139        //
140        // So for each proof in the batch, we add onto our big multiexp the
141        // following terms
142        // evalscale^i c_i Q_i
143        // evalscale^i delta_i
144        // - (evalscale^i z1_i) G_i
145        // - (evalscale^i z2_i) H
146        // - (evalscale^i z1_i b_i) U_i
147
148        // We also check that the sg component of the proof is equal to the
149        // polynomial commitment to the "s" array
150
151        let nonzero_length = self.g.len();
152
153        let max_rounds = math::ceil_log2(nonzero_length);
154
155        let padded_length = 1 << max_rounds;
156
157        let (_, endo_r) = endos::<G>();
158
159        // TODO: This will need adjusting
160        let padding = padded_length - nonzero_length;
161        let mut points = vec![self.h];
162        points.extend(self.g.clone());
163        points.extend(vec![G::zero(); padding]);
164
165        let mut scalars = vec![G::ScalarField::zero(); padded_length + 1];
166        assert_eq!(scalars.len(), points.len());
167
168        // sample randomiser to scale the proofs with
169        let rand_base = G::ScalarField::rand(rng);
170        let sg_rand_base = G::ScalarField::rand(rng);
171
172        let mut rand_base_i = G::ScalarField::one();
173        let mut sg_rand_base_i = G::ScalarField::one();
174
175        for BatchEvaluationProof {
176            sponge,
177            evaluation_points,
178            polyscale,
179            evalscale,
180            evaluations,
181            opening,
182            combined_inner_product,
183        } in batch.iter_mut()
184        {
185            sponge.absorb_fr(&[shift_scalar::<G>(*combined_inner_product)]);
186
187            let u_base: G = {
188                let t = sponge.challenge_fq();
189                let (x, y) = group_map.to_group(t);
190                G::of_coordinates(x, y)
191            };
192
193            let Challenges { chal, chal_inv } = opening.challenges::<EFqSponge>(&endo_r, sponge);
194
195            sponge.absorb_g(&[opening.delta]);
196            let c = ScalarChallenge(sponge.challenge()).to_field(&endo_r);
197
198            // < s, sum_i evalscale^i pows(evaluation_point[i]) >
199            // ==
200            // sum_i evalscale^i < s, pows(evaluation_point[i]) >
201            let b0 = {
202                let mut scale = G::ScalarField::one();
203                let mut res = G::ScalarField::zero();
204                for &e in evaluation_points.iter() {
205                    let term = b_poly(&chal, e);
206                    res += &(scale * term);
207                    scale *= *evalscale;
208                }
209                res
210            };
211
212            let s = b_poly_coefficients(&chal);
213
214            let neg_rand_base_i = -rand_base_i;
215
216            // TERM
217            // - rand_base_i z1 G
218            //
219            // we also add -sg_rand_base_i * G to check correctness of sg.
220            points.push(opening.sg);
221            scalars.push(neg_rand_base_i * opening.z1 - sg_rand_base_i);
222
223            // Here we add
224            // sg_rand_base_i * ( < s, self.g > )
225            // =
226            // < sg_rand_base_i s, self.g >
227            //
228            // to check correctness of the sg component.
229            {
230                let terms: Vec<_> = s.par_iter().map(|s| sg_rand_base_i * s).collect();
231
232                for (i, term) in terms.iter().enumerate() {
233                    scalars[i + 1] += term;
234                }
235            }
236
237            // TERM
238            // - rand_base_i * z2 * H
239            scalars[0] -= &(rand_base_i * opening.z2);
240
241            // TERM
242            // -rand_base_i * (z1 * b0 * U)
243            scalars.push(neg_rand_base_i * (opening.z1 * b0));
244            points.push(u_base);
245
246            // TERM
247            // rand_base_i c_i Q_i
248            // = rand_base_i c_i
249            //   (sum_j (chal_invs[j] L_j + chals[j] R_j) + P_prime)
250            // where P_prime = combined commitment + combined_inner_product * U
251            let rand_base_i_c_i = c * rand_base_i;
252            for ((l, r), (u_inv, u)) in opening.lr.iter().zip(chal_inv.iter().zip(chal.iter())) {
253                points.push(*l);
254                scalars.push(rand_base_i_c_i * u_inv);
255
256                points.push(*r);
257                scalars.push(rand_base_i_c_i * u);
258            }
259
260            // TERM
261            // sum_j evalscale^j (sum_i polyscale^i f_i) (elm_j)
262            // == sum_j sum_i evalscale^j polyscale^i f_i(elm_j)
263            // == sum_i polyscale^i sum_j evalscale^j f_i(elm_j)
264            combine_commitments(
265                evaluations,
266                &mut scalars,
267                &mut points,
268                *polyscale,
269                rand_base_i_c_i,
270            );
271
272            scalars.push(rand_base_i_c_i * *combined_inner_product);
273            points.push(u_base);
274
275            scalars.push(rand_base_i);
276            points.push(opening.delta);
277
278            rand_base_i *= &rand_base;
279            sg_rand_base_i *= &sg_rand_base;
280        }
281
282        // Verify the equation in two chunks, which is optimal for our SRS size.
283        // (see the comment to the `benchmark_msm_parallel_vesta` MSM benchmark)
284        let chunk_size = points.len() / 2;
285        let msm_res = points
286            .into_par_iter()
287            .chunks(chunk_size)
288            .zip(scalars.into_par_iter().chunks(chunk_size))
289            .map(|(bases, coeffs)| {
290                let coeffs_bigint = coeffs
291                    .into_iter()
292                    .map(|c| c.into_bigint())
293                    .collect::<Vec<_>>();
294                G::Group::msm_bigint(&bases, &coeffs_bigint)
295            })
296            .reduce(G::Group::zero, |mut l, r| {
297                l += r;
298                l
299            });
300
301        msm_res == G::Group::zero()
302    }
303
304    /// This function creates a trusted-setup SRS instance for circuits with
305    /// number of rows up to `depth`.
306    ///
307    /// # Safety
308    ///
309    /// This function is unsafe because it creates a trusted setup and the toxic
310    /// waste is passed as a parameter.
311    pub unsafe fn create_trusted_setup(x: G::ScalarField, depth: usize) -> Self {
312        let m = G::Map::setup();
313
314        let mut x_pow = G::ScalarField::one();
315        let g: Vec<_> = (0..depth)
316            .map(|_| {
317                let res = G::generator().mul(x_pow);
318                x_pow *= x;
319                res.into_affine()
320            })
321            .collect();
322
323        // Compute a blinder
324        let h = {
325            let mut h = Blake2b512::new();
326            h.update("srs_misc".as_bytes());
327            // FIXME: This is for retrocompatibility with a previous version
328            // that was using a list initialisation. It is not necessary.
329            h.update(0_u32.to_be_bytes());
330            point_of_random_bytes(&m, &h.finalize())
331        };
332
333        Self {
334            g,
335            h,
336            lagrange_bases: HashMapCache::new(),
337        }
338    }
339}
340
341impl<G: CommitmentCurve> SRS<G>
342where
343    <G as CommitmentCurve>::Map: Sync,
344    G::BaseField: PrimeField,
345{
346    /// This function creates SRS instance for circuits with number of rows up
347    /// to `depth`.
348    pub fn create_parallel(depth: usize) -> Self {
349        let m = G::Map::setup();
350
351        let g: Vec<_> = (0..depth)
352            .into_par_iter()
353            .map(|i| {
354                let mut h = Blake2b512::new();
355                h.update((i as u32).to_be_bytes());
356                point_of_random_bytes(&m, &h.finalize())
357            })
358            .collect();
359
360        // Compute a blinder
361        let h = {
362            let mut h = Blake2b512::new();
363            h.update("srs_misc".as_bytes());
364            // FIXME: This is for retrocompatibility with a previous version
365            // that was using a list initialisation. It is not necessary.
366            h.update(0_u32.to_be_bytes());
367            point_of_random_bytes(&m, &h.finalize())
368        };
369
370        Self {
371            g,
372            h,
373            lagrange_bases: HashMapCache::new(),
374        }
375    }
376}
377
378impl<G> SRSTrait<G> for SRS<G>
379where
380    G: CommitmentCurve,
381{
382    /// The maximum polynomial degree that can be committed to
383    fn max_poly_size(&self) -> usize {
384        self.g.len()
385    }
386
387    fn blinding_commitment(&self) -> G {
388        self.h
389    }
390
391    /// Turns a non-hiding polynomial commitment into a hidding polynomial
392    /// commitment. Transforms each given `<a, G>` into `(<a, G> + wH, w)` with
393    /// a random `w` per commitment.
394    fn mask(
395        &self,
396        comm: PolyComm<G>,
397        rng: &mut (impl RngCore + CryptoRng),
398    ) -> BlindedCommitment<G> {
399        let blinders = comm.map(|_| G::ScalarField::rand(rng));
400        self.mask_custom(comm, &blinders).unwrap()
401    }
402
403    fn mask_custom(
404        &self,
405        com: PolyComm<G>,
406        blinders: &PolyComm<G::ScalarField>,
407    ) -> Result<BlindedCommitment<G>, CommitmentError> {
408        let commitment = com
409            .zip(blinders)
410            .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))?
411            .map(|(g, b)| {
412                let mut g_masked = self.h.mul(b);
413                g_masked.add_assign(&g);
414                g_masked.into_affine()
415            });
416        Ok(BlindedCommitment {
417            commitment,
418            blinders: blinders.clone(),
419        })
420    }
421
422    fn commit_non_hiding(
423        &self,
424        plnm: &DensePolynomial<G::ScalarField>,
425        num_chunks: usize,
426    ) -> PolyComm<G> {
427        let is_zero = plnm.is_zero();
428
429        // chunk while committing
430        let mut chunks: Vec<_> = if is_zero {
431            vec![G::zero()]
432        } else if plnm.len() < self.g.len() {
433            vec![G::Group::msm(&self.g[..plnm.len()], &plnm.coeffs)
434                .unwrap()
435                .into_affine()]
436        } else if plnm.len() == self.g.len() {
437            // when processing a single chunk, it's faster to parallelise
438            // vertically in 2 threads (see the comment to the
439            // `benchmark_msm_parallel_vesta` MSM benchmark)
440            let n = self.g.len();
441            let (r1, r2) = rayon::join(
442                || G::Group::msm(&self.g[..n / 2], &plnm.coeffs[..n / 2]).unwrap(),
443                || G::Group::msm(&self.g[n / 2..n], &plnm.coeffs[n / 2..n]).unwrap(),
444            );
445
446            vec![(r1 + r2).into_affine()]
447        } else {
448            // otherwise it's better to parallelise horizontally along chunks
449            plnm.into_par_iter()
450                .chunks(self.g.len())
451                .map(|chunk| {
452                    let chunk_coeffs = chunk
453                        .into_iter()
454                        .map(|c| c.into_bigint())
455                        .collect::<Vec<_>>();
456                    let chunk_res = G::Group::msm_bigint(&self.g, &chunk_coeffs);
457                    chunk_res.into_affine()
458                })
459                .collect()
460        };
461
462        for _ in chunks.len()..num_chunks {
463            chunks.push(G::zero());
464        }
465
466        PolyComm::<G>::new(chunks)
467    }
468
469    fn commit(
470        &self,
471        plnm: &DensePolynomial<G::ScalarField>,
472        num_chunks: usize,
473        rng: &mut (impl RngCore + CryptoRng),
474    ) -> BlindedCommitment<G> {
475        self.mask(self.commit_non_hiding(plnm, num_chunks), rng)
476    }
477
478    fn commit_custom(
479        &self,
480        plnm: &DensePolynomial<G::ScalarField>,
481        num_chunks: usize,
482        blinders: &PolyComm<G::ScalarField>,
483    ) -> Result<BlindedCommitment<G>, CommitmentError> {
484        self.mask_custom(self.commit_non_hiding(plnm, num_chunks), blinders)
485    }
486
487    fn commit_evaluations_non_hiding(
488        &self,
489        domain: D<G::ScalarField>,
490        plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
491    ) -> PolyComm<G> {
492        let basis = self.get_lagrange_basis(domain);
493        let commit_evaluations = |evals: &Vec<G::ScalarField>, basis: &Vec<PolyComm<G>>| {
494            PolyComm::<G>::multi_scalar_mul(&basis.iter().collect::<Vec<_>>()[..], &evals[..])
495        };
496        match domain.size.cmp(&plnm.domain().size) {
497            std::cmp::Ordering::Less => {
498                let s = (plnm.domain().size / domain.size) as usize;
499                let v: Vec<_> = (0..(domain.size())).map(|i| plnm.evals[s * i]).collect();
500                commit_evaluations(&v, basis)
501            }
502            std::cmp::Ordering::Equal => commit_evaluations(&plnm.evals, basis),
503            std::cmp::Ordering::Greater => {
504                panic!("desired commitment domain size ({}) greater than evaluations' domain size ({}):", domain.size, plnm.domain().size)
505            }
506        }
507    }
508
509    fn commit_evaluations(
510        &self,
511        domain: D<G::ScalarField>,
512        plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
513        rng: &mut (impl RngCore + CryptoRng),
514    ) -> BlindedCommitment<G> {
515        self.mask(self.commit_evaluations_non_hiding(domain, plnm), rng)
516    }
517
518    fn commit_evaluations_custom(
519        &self,
520        domain: D<G::ScalarField>,
521        plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
522        blinders: &PolyComm<G::ScalarField>,
523    ) -> Result<BlindedCommitment<G>, CommitmentError> {
524        self.mask_custom(self.commit_evaluations_non_hiding(domain, plnm), blinders)
525    }
526
527    fn create(depth: usize) -> Self {
528        let m = G::Map::setup();
529
530        let g: Vec<_> = (0..depth)
531            .map(|i| {
532                let mut h = Blake2b512::new();
533                h.update((i as u32).to_be_bytes());
534                point_of_random_bytes(&m, &h.finalize())
535            })
536            .collect();
537
538        // Compute a blinder
539        let h = {
540            let mut h = Blake2b512::new();
541            h.update("srs_misc".as_bytes());
542            // FIXME: This is for retrocompatibility with a previous version
543            // that was using a list initialisation. It is not necessary.
544            h.update(0_u32.to_be_bytes());
545            point_of_random_bytes(&m, &h.finalize())
546        };
547
548        Self {
549            g,
550            h,
551            lagrange_bases: HashMapCache::new(),
552        }
553    }
554
555    fn get_lagrange_basis_from_domain_size(&self, domain_size: usize) -> &Vec<PolyComm<G>> {
556        self.lagrange_bases.get_or_generate(domain_size, || {
557            self.lagrange_basis(D::new(domain_size).unwrap())
558        })
559    }
560
561    fn get_lagrange_basis(&self, domain: D<G::ScalarField>) -> &Vec<PolyComm<G>> {
562        self.lagrange_bases
563            .get_or_generate(domain.size(), || self.lagrange_basis(domain))
564    }
565
566    fn size(&self) -> usize {
567        self.g.len()
568    }
569}
570
571impl<G: CommitmentCurve> SRS<G> {
572    #[allow(clippy::type_complexity)]
573    #[allow(clippy::many_single_char_names)]
574    // NB: a slight modification to the original protocol is done when absorbing
575    // the first prover message to improve the efficiency in a recursive
576    // setting.
577    pub fn open<EFqSponge, RNG, D: EvaluationDomain<G::ScalarField>>(
578        &self,
579        group_map: &G::Map,
580        plnms: PolynomialsToCombine<G, D>,
581        elm: &[G::ScalarField],
582        polyscale: G::ScalarField,
583        evalscale: G::ScalarField,
584        mut sponge: EFqSponge,
585        rng: &mut RNG,
586    ) -> OpeningProof<G>
587    where
588        EFqSponge: Clone + FqSponge<G::BaseField, G, G::ScalarField>,
589        RNG: RngCore + CryptoRng,
590        G::BaseField: PrimeField,
591        G: EndoCurve,
592    {
593        let (endo_q, endo_r) = endos::<G>();
594
595        let rounds = math::ceil_log2(self.g.len());
596        let padded_length = 1 << rounds;
597
598        // TODO: Trim this to the degree of the largest polynomial
599        // TODO: We do always suppose we have a power of 2 for the SRS in
600        // practice. Therefore, padding equals zero, and this code can be
601        // removed. Only a current test case uses a SRS with a non-power of 2.
602        let padding = padded_length - self.g.len();
603        let mut g = self.g.clone();
604        g.extend(vec![G::zero(); padding]);
605
606        // Combines polynomials roughly as follows: p(X) := ∑_i polyscale^i p_i(X)
607        //
608        // `blinding_factor` is a combined set of commitments that are
609        // paired with polynomials in `plnms`. In kimchi, these input
610        // commitments are poly com blinders, so often `[G::ScalarField::one();
611        // num_chunks]` or zeroes.
612        let (p, blinding_factor) = combine_polys::<G, D>(plnms, polyscale, self.g.len());
613
614        // The initial evaluation vector for polynomial commitment b_init is not
615        // just the powers of a single point as in the original IPA
616        // (1,ζ,ζ^2,...)
617        //
618        // but rather a vector of linearly combined powers with `evalscale` as
619        // recombiner.
620        //
621        // b_init[j] = Σ_i evalscale^i elm_i^j
622        //           = ζ^j + evalscale * ζ^j ω^j (in the specific case of challenges (ζ,ζω))
623        //
624        // So in our case b_init is the following vector:
625        //    1 + evalscale
626        //    ζ + evalscale * ζ ω
627        //    ζ^2 + evalscale * (ζ ω)^2
628        //    ζ^3 + evalscale * (ζ ω)^3
629        //    ...
630        let b_init = {
631            // randomise/scale the eval powers
632            let mut scale = G::ScalarField::one();
633            let mut res: Vec<G::ScalarField> =
634                (0..padded_length).map(|_| G::ScalarField::zero()).collect();
635            for e in elm {
636                for (i, t) in pows(padded_length, *e).iter().enumerate() {
637                    res[i] += &(scale * t);
638                }
639                scale *= &evalscale;
640            }
641            res
642        };
643
644        // Combined polynomial p(X) evaluated at the combined eval point b_init.
645        let combined_inner_product = p
646            .coeffs
647            .iter()
648            .zip(b_init.iter())
649            .map(|(a, b)| *a * b)
650            .fold(G::ScalarField::zero(), |acc, x| acc + x);
651
652        // Usually, the prover sends `combined_inner_product`` to the verifier
653        // So we should absorb `combined_inner_product``
654        // However it is more efficient in the recursion circuit
655        // to absorb a slightly modified version of it.
656        // As a reminder, in a recursive setting, the challenges are given as a
657        // public input and verified in the next iteration.
658        // See the `shift_scalar`` doc.
659        sponge.absorb_fr(&[shift_scalar::<G>(combined_inner_product)]);
660
661        // Generate another randomisation base U; our commitments will be w.r.t
662        // bases {G_i},H,U.
663        let u_base: G = {
664            let t = sponge.challenge_fq();
665            let (x, y) = group_map.to_group(t);
666            G::of_coordinates(x, y)
667        };
668
669        let mut a = p.coeffs;
670        assert!(padded_length >= a.len());
671        a.extend(vec![G::ScalarField::zero(); padded_length - a.len()]);
672
673        let mut b = b_init;
674
675        let mut lr = vec![];
676
677        let mut blinders = vec![];
678
679        let mut chals = vec![];
680        let mut chal_invs = vec![];
681
682        // The main IPA folding loop that has log iterations.
683        for _ in 0..rounds {
684            let n = g.len() / 2;
685            // Pedersen bases
686            let (g_lo, g_hi) = (&g[0..n], &g[n..]);
687            // Polynomial coefficients
688            let (a_lo, a_hi) = (&a[0..n], &a[n..]);
689            // Evaluation points
690            let (b_lo, b_hi) = (&b[0..n], &b[n..]);
691
692            // Blinders for L/R
693            let rand_l = <G::ScalarField as UniformRand>::rand(rng);
694            let rand_r = <G::ScalarField as UniformRand>::rand(rng);
695
696            // Pedersen commitment to a_lo,rand_l,<a_hi,b_lo>
697            let l = G::Group::msm_bigint(
698                &[g_lo, &[self.h, u_base]].concat(),
699                &[a_hi, &[rand_l, inner_prod(a_hi, b_lo)]]
700                    .concat()
701                    .iter()
702                    .map(|x| x.into_bigint())
703                    .collect::<Vec<_>>(),
704            )
705            .into_affine();
706
707            let r = G::Group::msm_bigint(
708                &[g_hi, &[self.h, u_base]].concat(),
709                &[a_lo, &[rand_r, inner_prod(a_lo, b_hi)]]
710                    .concat()
711                    .iter()
712                    .map(|x| x.into_bigint())
713                    .collect::<Vec<_>>(),
714            )
715            .into_affine();
716
717            lr.push((l, r));
718            blinders.push((rand_l, rand_r));
719
720            sponge.absorb_g(&[l]);
721            sponge.absorb_g(&[r]);
722
723            // Round #i challenges;
724            // - not to be confused with "u_base"
725            // - not to be confused with "u" as "polyscale"
726            let u_pre = squeeze_prechallenge(&mut sponge);
727            let u = u_pre.to_field(&endo_r);
728            let u_inv = u.inverse().unwrap();
729
730            chals.push(u);
731            chal_invs.push(u_inv);
732
733            // IPA-folding polynomial coefficients
734            a = a_hi
735                .par_iter()
736                .zip(a_lo)
737                .map(|(&hi, &lo)| {
738                    // lo + u_inv * hi
739                    let mut res = hi;
740                    res *= u_inv;
741                    res += &lo;
742                    res
743                })
744                .collect();
745
746            // IPA-folding evaluation points
747            b = b_lo
748                .par_iter()
749                .zip(b_hi)
750                .map(|(&lo, &hi)| {
751                    // lo + u * hi
752                    let mut res = hi;
753                    res *= u;
754                    res += &lo;
755                    res
756                })
757                .collect();
758
759            // IPA-folding bases
760            g = G::combine_one_endo(endo_r, endo_q, g_lo, g_hi, u_pre);
761        }
762
763        assert!(
764            g.len() == 1 && a.len() == 1 && b.len() == 1,
765            "IPA commitment folding must produce single elements after log rounds"
766        );
767        let a0 = a[0];
768        let b0 = b[0];
769        let g0 = g[0];
770
771        // Compute r_prime, a folded blinder. It combines blinders on
772        // each individual step of the IPA folding process together
773        // with the final blinding_factor of the polynomial.
774        //
775        // r_prime := ∑_i (rand_l[i] * u[i]^{-1} + rand_r * u[i])
776        //          + blinding_factor
777        //
778        // where u is a vector of folding challenges, and rand_l/rand_r are
779        // intermediate L/R blinders.
780        let r_prime = blinders
781            .iter()
782            .zip(chals.iter().zip(chal_invs.iter()))
783            .map(|((rand_l, rand_r), (u, u_inv))| ((*rand_l) * u_inv) + (*rand_r * u))
784            .fold(blinding_factor, |acc, x| acc + x);
785
786        let d = <G::ScalarField as UniformRand>::rand(rng);
787        let r_delta = <G::ScalarField as UniformRand>::rand(rng);
788
789        // Compute delta, the commitment
790        // delta = [d] G0 + \
791        //         [b0*d] U_base + \
792        //         [r_delta] H^r (as a group element, in additive notation)
793        let delta = ((g0.into_group() + (u_base.mul(b0))).into_affine().mul(d)
794            + self.h.mul(r_delta))
795        .into_affine();
796
797        sponge.absorb_g(&[delta]);
798        let c = ScalarChallenge(sponge.challenge()).to_field(&endo_r);
799
800        // (?) Schnorr-like responses showing the knowledge of r_prime and a0.
801        let z1 = a0 * c + d;
802        let z2 = r_prime * c + r_delta;
803
804        OpeningProof {
805            delta,
806            lr,
807            z1,
808            z2,
809            sg: g0,
810        }
811    }
812
813    fn lagrange_basis(&self, domain: D<G::ScalarField>) -> Vec<PolyComm<G>> {
814        let n = domain.size();
815
816        // Let V be a vector space over the field F.
817        //
818        // Given
819        // - a domain [ 1, w, w^2, ..., w^{n - 1} ]
820        // - a vector v := [ v_0, ..., v_{n - 1} ] in V^n
821        //
822        // the FFT algorithm computes the matrix application
823        //
824        // u = M(w) * v
825        //
826        // where
827        // M(w) =
828        //   1 1       1           ... 1
829        //   1 w       w^2         ... w^{n-1}
830        //   ...
831        //   1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1}
832        //
833        // The IFFT algorithm computes
834        //
835        // v = M(w)^{-1} * u
836        //
837        // Let's see how we can use this algorithm to compute the lagrange basis
838        // commitments.
839        //
840        // Let V be the vector space F[x] of polynomials in x over F.
841        // Let v in V be the vector [ L_0, ..., L_{n - 1} ] where L_i is the i^{th}
842        // normalized Lagrange polynomial (where L_i(w^j) = j == i ? 1 : 0).
843        //
844        // Consider the rows of M(w) * v. Let me write out the matrix and vector
845        // so you can see more easily.
846        //
847        //   | 1 1       1           ... 1               |   | L_0     |
848        //   | 1 w       w^2         ... w^{n-1}         | * | L_1     |
849        //   | ...                                       |   | ...     |
850        //   | 1 w^{n-1} (w^2)^{n-1} ... (w^{n-1})^{n-1} |   | L_{n-1} |
851        //
852        // The 0th row is L_0 + L1 + ... + L_{n - 1}. So, it's the polynomial
853        // that has the value 1 on every element of the domain.
854        // In other words, it's the polynomial 1.
855        //
856        // The 1st row is L_0 + w L_1 + ... + w^{n - 1} L_{n - 1}. So, it's the
857        // polynomial which has value w^i on w^i.
858        // In other words, it's the polynomial x.
859        //
860        // In general, you can see that row i is in fact the polynomial x^i.
861        //
862        // Thus, M(w) * v is the vector u, where u = [ 1, x, x^2, ..., x^n ]
863        //
864        // Therefore, the IFFT algorithm, when applied to the vector u (the
865        // standard monomial basis) will yield the vector v of the (normalized)
866        // Lagrange polynomials.
867        //
868        // Now, because the polynomial commitment scheme is additively
869        // homomorphic, and because the commitment to the polynomial x^i is just
870        // self.g[i], we can obtain commitments to the normalized Lagrange
871        // polynomials by applying IFFT to the vector self.g[0..n].
872        //
873        //
874        // Further still, we can do the same trick for 'chunked' polynomials.
875        //
876        // Recall that a chunked polynomial is some f of degree k*n - 1 with
877        // f(x) = f_0(x) + x^n f_1(x) + ... + x^{(k-1) n} f_{k-1}(x)
878        // where each f_i has degree n-1.
879        //
880        // In the above, if we set u = [ 1, x^2, ... x^{n-1}, 0, 0, .., 0 ]
881        // then we effectively 'zero out' any polynomial terms higher than
882        // x^{n-1}, leaving us with the 'partial Lagrange polynomials' that
883        // contribute to f_0.
884        //
885        // Similarly, u = [ 0, 0, ..., 0, 1, x^2, ..., x^{n-1}, 0, 0, ..., 0]
886        // with n leading zeros 'zeroes out' all terms except the 'partial
887        // Lagrange polynomials' that contribute to f_1, and likewise for each
888        // f_i.
889        //
890        // By computing each of these, and recollecting the terms as a vector of
891        // polynomial commitments, we obtain a chunked commitment to the L_i
892        // polynomials.
893        let srs_size = self.g.len();
894        let num_elems = (n + srs_size - 1) / srs_size;
895        let mut chunks = Vec::with_capacity(num_elems);
896
897        // For each chunk
898        for i in 0..num_elems {
899            // Initialize the vector with zero curve points
900            let mut lg: Vec<<G as AffineRepr>::Group> = vec![<G as AffineRepr>::Group::zero(); n];
901            // Overwrite the terms corresponding to that chunk with the SRS
902            // curve points
903            let start_offset = i * srs_size;
904            let num_terms = min((i + 1) * srs_size, n) - start_offset;
905            for j in 0..num_terms {
906                lg[start_offset + j] = self.g[j].into_group()
907            }
908            // Apply the IFFT
909            domain.ifft_in_place(&mut lg);
910            // Append the 'partial Langrange polynomials' to the vector of elems
911            // chunks
912            chunks.push(<G as AffineRepr>::Group::normalize_batch(lg.as_mut_slice()));
913        }
914
915        (0..n)
916            .map(|i| PolyComm {
917                chunks: chunks.iter().map(|v| v[i]).collect(),
918            })
919            .collect()
920    }
921}
922
923#[serde_as]
924#[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq)]
925#[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")]
926pub struct OpeningProof<G: AffineRepr> {
927    /// Vector of rounds of L & R commitments
928    #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")]
929    pub lr: Vec<(G, G)>,
930    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
931    pub delta: G,
932    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
933    pub z1: G::ScalarField,
934    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
935    pub z2: G::ScalarField,
936    /// A final folded commitment base
937    #[serde_as(as = "o1_utils::serialization::SerdeAs")]
938    pub sg: G,
939}
940
941impl<BaseField: PrimeField, G: AffineRepr<BaseField = BaseField> + CommitmentCurve + EndoCurve>
942    crate::OpenProof<G> for OpeningProof<G>
943{
944    type SRS = SRS<G>;
945
946    fn open<EFqSponge, RNG, D: EvaluationDomain<<G as AffineRepr>::ScalarField>>(
947        srs: &Self::SRS,
948        group_map: &<G as CommitmentCurve>::Map,
949        plnms: PolynomialsToCombine<G, D>,
950        elm: &[<G as AffineRepr>::ScalarField],
951        polyscale: <G as AffineRepr>::ScalarField,
952        evalscale: <G as AffineRepr>::ScalarField,
953        sponge: EFqSponge,
954        rng: &mut RNG,
955    ) -> Self
956    where
957        EFqSponge:
958            Clone + FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField>,
959        RNG: RngCore + CryptoRng,
960    {
961        srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng)
962    }
963
964    fn verify<EFqSponge, RNG>(
965        srs: &Self::SRS,
966        group_map: &G::Map,
967        batch: &mut [BatchEvaluationProof<G, EFqSponge, Self>],
968        rng: &mut RNG,
969    ) -> bool
970    where
971        EFqSponge: FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField>,
972        RNG: RngCore + CryptoRng,
973    {
974        srs.verify(group_map, batch, rng)
975    }
976}
977
978/// Commitment round challenges (endo mapped) and their inverses.
979pub struct Challenges<F> {
980    pub chal: Vec<F>,
981    pub chal_inv: Vec<F>,
982}
983
984impl<G: AffineRepr> OpeningProof<G> {
985    /// Computes a log-sized vector of scalar challenges for
986    /// recombining elements inside the IPA.
987    pub fn prechallenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField>>(
988        &self,
989        sponge: &mut EFqSponge,
990    ) -> Vec<ScalarChallenge<G::ScalarField>> {
991        let _t = sponge.challenge_fq();
992        self.lr
993            .iter()
994            .map(|(l, r)| {
995                sponge.absorb_g(&[*l]);
996                sponge.absorb_g(&[*r]);
997                squeeze_prechallenge(sponge)
998            })
999            .collect()
1000    }
1001
1002    /// Same as `prechallenges`, but maps scalar challenges using the provided
1003    /// endomorphism, and computes their inverses.
1004    pub fn challenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField>>(
1005        &self,
1006        endo_r: &G::ScalarField,
1007        sponge: &mut EFqSponge,
1008    ) -> Challenges<G::ScalarField> {
1009        let chal: Vec<_> = self
1010            .lr
1011            .iter()
1012            .map(|(l, r)| {
1013                sponge.absorb_g(&[*l]);
1014                sponge.absorb_g(&[*r]);
1015                squeeze_challenge(endo_r, sponge)
1016            })
1017            .collect();
1018
1019        let chal_inv = {
1020            let mut cs = chal.clone();
1021            ark_ff::batch_inversion(&mut cs);
1022            cs
1023        };
1024
1025        Challenges { chal, chal_inv }
1026    }
1027}
1028
1029#[cfg(feature = "ocaml_types")]
1030pub mod caml {
1031    use super::OpeningProof;
1032    use ark_ec::AffineRepr;
1033    use ocaml;
1034
1035    #[derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)]
1036    pub struct CamlOpeningProof<G, F> {
1037        /// vector of rounds of L & R commitments
1038        pub lr: Vec<(G, G)>,
1039        pub delta: G,
1040        pub z1: F,
1041        pub z2: F,
1042        pub sg: G,
1043    }
1044
1045    impl<G, CamlF, CamlG> From<OpeningProof<G>> for CamlOpeningProof<CamlG, CamlF>
1046    where
1047        G: AffineRepr,
1048        CamlG: From<G>,
1049        CamlF: From<G::ScalarField>,
1050    {
1051        fn from(opening_proof: OpeningProof<G>) -> Self {
1052            Self {
1053                lr: opening_proof
1054                    .lr
1055                    .into_iter()
1056                    .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2)))
1057                    .collect(),
1058                delta: CamlG::from(opening_proof.delta),
1059                z1: opening_proof.z1.into(),
1060                z2: opening_proof.z2.into(),
1061                sg: CamlG::from(opening_proof.sg),
1062            }
1063        }
1064    }
1065
1066    impl<G, CamlF, CamlG> From<CamlOpeningProof<CamlG, CamlF>> for OpeningProof<G>
1067    where
1068        G: AffineRepr,
1069        CamlG: Into<G>,
1070        CamlF: Into<G::ScalarField>,
1071    {
1072        fn from(caml: CamlOpeningProof<CamlG, CamlF>) -> Self {
1073            Self {
1074                lr: caml
1075                    .lr
1076                    .into_iter()
1077                    .map(|(g1, g2)| (g1.into(), g2.into()))
1078                    .collect(),
1079                delta: caml.delta.into(),
1080                z1: caml.z1.into(),
1081                z2: caml.z2.into(),
1082                sg: caml.sg.into(),
1083            }
1084        }
1085    }
1086}