1#[cfg(not(feature = "std"))]
9use crate::collections::HashMap;
10#[cfg(feature = "std")]
11use crate::hash_map_cache::HashMapCache;
12#[cfg(feature = "std")]
13use crate::precomputed_srs::TestSRS;
14use crate::{
15 commitment::{
16 b_poly, b_poly_coefficients, combine_commitments, shift_scalar, squeeze_challenge,
17 squeeze_prechallenge, BatchEvaluationProof, BlindedCommitment, CommitmentCurve, EndoCurve,
18 PolyComm,
19 },
20 error::CommitmentError,
21 SRS as SRSTrait,
22};
23#[cfg(feature = "std")]
24use crate::{utils::combine_polys, PolynomialsToCombine};
25use alloc::{vec, vec::Vec};
26use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM};
27#[cfg(feature = "std")]
28use ark_ff::{BigInteger, Field};
29use ark_ff::{One, PrimeField, UniformRand, Zero};
30#[cfg(feature = "std")]
31use ark_poly::{univariate::DensePolynomial, Evaluations};
32use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D};
33use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
34#[cfg(feature = "std")]
35use blake2::{Blake2b512, Digest};
36use core::{
37 cmp::min,
38 ops::{AddAssign, Deref},
39};
40use groupmap::GroupMap;
41use mina_poseidon::{sponge::ScalarChallenge, FqSponge};
42#[cfg(feature = "std")]
43use o1_utils::field_helpers::{inner_prod, pows};
44use o1_utils::math;
45use rand_core::{CryptoRng, RngCore};
46#[cfg(feature = "parallel")]
47use rayon::prelude::*;
48use serde::{Deserialize, Serialize};
49use serde_with::serde_as;
50#[cfg(feature = "std")]
51use zeroize::Zeroize;
52
53#[serde_as]
54#[derive(Debug, Clone, Default, Serialize, Deserialize)]
55#[serde(bound = "G: CanonicalDeserialize + CanonicalSerialize")]
56pub struct SRS<G> {
57 #[serde_as(as = "Vec<o1_utils::serialization::SerdeAs>")]
60 pub g: Vec<G>,
61
62 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
64 pub h: G,
65
66 #[cfg(feature = "std")]
70 #[serde(skip)]
71 lagrange_bases: HashMapCache<usize, Vec<PolyComm<G>>>,
72 #[cfg(not(feature = "std"))]
73 #[serde(skip)]
74 lagrange_bases: LagrangeCache<G>,
75}
76
77#[cfg(not(feature = "std"))]
78type LagrangeCache<G> =
79 alloc::rc::Rc<core::cell::RefCell<HashMap<usize, alloc::rc::Rc<Vec<PolyComm<G>>>>>>;
80
81#[cfg(not(feature = "std"))]
82trait GetOrGenExt<G> {
83 fn get_or_generate<F: FnOnce() -> Vec<PolyComm<G>>>(
84 &self,
85 key: usize,
86 generator: F,
87 ) -> alloc::rc::Rc<Vec<PolyComm<G>>>;
88}
89
90#[cfg(not(feature = "std"))]
91impl<G> GetOrGenExt<G> for LagrangeCache<G> {
92 fn get_or_generate<F: FnOnce() -> Vec<PolyComm<G>>>(
93 &self,
94 key: usize,
95 generator: F,
96 ) -> alloc::rc::Rc<Vec<PolyComm<G>>> {
97 let mut map = self.borrow_mut();
98 let entry = map
99 .entry(key)
100 .or_insert_with(|| alloc::rc::Rc::new(generator()));
101 alloc::rc::Rc::clone(entry)
102 }
103}
104
105#[cfg(not(feature = "std"))]
108#[allow(unsafe_code, clippy::non_send_fields_in_send_ty)]
109unsafe impl<G: Send> Send for SRS<G> {}
110#[cfg(not(feature = "std"))]
111#[allow(unsafe_code)]
112unsafe impl<G: Sync> Sync for SRS<G> {}
113
114#[cfg(feature = "std")]
115impl<G> From<TestSRS<G>> for SRS<G> {
116 fn from(value: TestSRS<G>) -> Self {
117 Self {
118 g: value.g,
119 h: value.h,
120 #[cfg(feature = "std")]
121 lagrange_bases: HashMapCache::new_from_hashmap(value.lagrange_bases),
122 #[cfg(not(feature = "std"))]
123 lagrange_bases: value.lagrange_bases,
124 }
125 }
126}
127
128#[cfg(feature = "std")]
129impl<G: Clone> From<SRS<G>> for TestSRS<G> {
130 fn from(value: SRS<G>) -> Self {
131 let lagrange_basis = value.lagrange_bases().clone();
132
133 Self {
134 g: value.g,
135 h: value.h,
136 lagrange_bases: lagrange_basis.into(),
137 }
138 }
139}
140
141impl<G> SRS<G> {
142 #[cfg(feature = "std")]
143 pub fn new(g: Vec<G>, h: G) -> Self {
144 Self {
145 g,
146 h,
147 lagrange_bases: HashMapCache::new(),
148 }
149 }
150
151 #[cfg(feature = "std")]
152 pub const fn lagrange_bases(&self) -> &HashMapCache<usize, Vec<PolyComm<G>>> {
153 &self.lagrange_bases
154 }
155
156 #[cfg(not(feature = "std"))]
157 pub const fn lagrange_bases(&self) -> &LagrangeCache<G> {
158 &self.lagrange_bases
159 }
160}
161
162impl<G> PartialEq for SRS<G>
163where
164 G: PartialEq,
165{
166 fn eq(&self, other: &Self) -> bool {
167 self.g == other.g && self.h == other.h
168 }
169}
170
171#[must_use]
214pub fn endos<G: CommitmentCurve>() -> (G::BaseField, G::ScalarField)
215where
216 G::BaseField: PrimeField,
217{
218 let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient();
219 let endo_r = {
220 let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient();
221 let t = G::generator();
222 let (x, y) = t.to_coordinates().unwrap();
223 let phi_t = G::of_coordinates(x * endo_q, y);
224 if t.mul(potential_endo_r) == phi_t.into_group() {
225 potential_endo_r
226 } else {
227 potential_endo_r * potential_endo_r
228 }
229 };
230 (endo_q, endo_r)
231}
232
233#[cfg(feature = "std")]
234fn point_of_random_bytes<G: CommitmentCurve>(map: &G::Map, random_bytes: &[u8]) -> G
235where
236 G::BaseField: Field,
237{
238 const N: usize = 31;
240 #[allow(clippy::cast_possible_truncation)]
241 let extension_degree = G::BaseField::extension_degree() as usize;
242
243 let mut base_fields = Vec::with_capacity(N * extension_degree);
244
245 for base_count in 0..extension_degree {
246 let mut bits = [false; 8 * N];
247 let offset = base_count * N;
248 for i in 0..N {
249 for j in 0..8 {
250 bits[8 * i + j] = (random_bytes[offset + i] >> j) & 1 == 1;
251 }
252 }
253
254 let n =
255 <<G::BaseField as Field>::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits);
256 let t = <<G::BaseField as Field>::BasePrimeField as PrimeField>::from_bigint(n)
257 .expect("packing code has a bug");
258 base_fields.push(t);
259 }
260
261 let t = G::BaseField::from_base_prime_field_elems(base_fields).unwrap();
262
263 let (x, y) = map.to_group(t);
264 G::of_coordinates(x, y).mul_by_cofactor()
265}
266
267impl<G: CommitmentCurve> SRS<G> {
269 #[allow(clippy::too_many_lines)]
301 pub fn verify<EFqSponge, RNG, const FULL_ROUNDS: usize>(
302 &self,
303 group_map: &G::Map,
304 batch: &mut [BatchEvaluationProof<
305 G,
306 EFqSponge,
307 OpeningProof<G, FULL_ROUNDS>,
308 FULL_ROUNDS,
309 >],
310 rng: &mut RNG,
311 ) -> bool
312 where
313 EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
314 RNG: RngCore + CryptoRng,
315 G::BaseField: PrimeField,
316 {
317 let nonzero_length = self.g.len();
339
340 let max_rounds = math::ceil_log2(nonzero_length);
341
342 let padded_length = 1 << max_rounds;
343
344 let (_, endo_r) = endos::<G>();
345
346 let padding = padded_length - nonzero_length;
348 let mut points = vec![self.h];
349 points.extend(self.g.clone());
350 points.extend(vec![G::zero(); padding]);
351
352 let mut scalars = vec![G::ScalarField::zero(); padded_length + 1];
353 assert_eq!(scalars.len(), points.len());
354
355 let rand_base = G::ScalarField::rand(rng);
357 let sg_rand_base = G::ScalarField::rand(rng);
358
359 let mut rand_base_i = G::ScalarField::one();
360 let mut sg_rand_base_i = G::ScalarField::one();
361
362 for BatchEvaluationProof {
363 sponge,
364 evaluation_points,
365 polyscale,
366 evalscale,
367 evaluations,
368 opening,
369 combined_inner_product,
370 } in batch.iter_mut()
371 {
372 sponge.absorb_fr(&[shift_scalar::<G>(*combined_inner_product)]);
373
374 let u_base: G = {
375 let t = sponge.challenge_fq();
376 let (x, y) = group_map.to_group(t);
377 G::of_coordinates(x, y)
378 };
379
380 let Challenges { chal, chal_inv } = opening.challenges::<EFqSponge>(&endo_r, sponge);
381
382 sponge.absorb_g(&[opening.delta]);
383 let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
384
385 let b0 = {
390 let mut scale = G::ScalarField::one();
391 let mut res = G::ScalarField::zero();
392 for &e in evaluation_points.iter() {
393 let term = b_poly(&chal, e);
394 res += &(scale * term);
395 scale *= *evalscale;
396 }
397 res
398 };
399
400 let s = b_poly_coefficients(&chal);
403
404 let neg_rand_base_i = -rand_base_i;
405
406 points.push(opening.sg);
411 scalars.push(neg_rand_base_i * opening.z1 - sg_rand_base_i);
412
413 {
420 let terms: Vec<_> = o1_utils::cfg_iter!(s).map(|s| sg_rand_base_i * s).collect();
421
422 for (i, term) in terms.iter().enumerate() {
423 scalars[i + 1] += term;
424 }
425 }
426
427 scalars[0] -= &(rand_base_i * opening.z2);
430
431 scalars.push(neg_rand_base_i * (opening.z1 * b0));
434 points.push(u_base);
435
436 let rand_base_i_c_i = c * rand_base_i;
442 for ((l, r), (u_inv, u)) in opening.lr.iter().zip(chal_inv.iter().zip(chal.iter())) {
443 points.push(*l);
444 scalars.push(rand_base_i_c_i * u_inv);
445
446 points.push(*r);
447 scalars.push(rand_base_i_c_i * u);
448 }
449
450 combine_commitments(
455 evaluations,
456 &mut scalars,
457 &mut points,
458 *polyscale,
459 rand_base_i_c_i,
460 );
461
462 scalars.push(rand_base_i_c_i * *combined_inner_product);
463 points.push(u_base);
464
465 scalars.push(rand_base_i);
466 points.push(opening.delta);
467
468 rand_base_i *= &rand_base;
469 sg_rand_base_i *= &sg_rand_base;
470 }
471
472 let msm_res = {
475 #[cfg(feature = "parallel")]
476 {
477 let chunk_size = points.len() / 2;
478 points
479 .into_par_iter()
480 .chunks(chunk_size)
481 .zip(scalars.into_par_iter().chunks(chunk_size))
482 .map(|(bases, coeffs)| {
483 let coeffs_bigint = coeffs
484 .into_iter()
485 .map(ark_ff::PrimeField::into_bigint)
486 .collect::<Vec<_>>();
487 G::Group::msm_bigint(&bases, &coeffs_bigint)
488 })
489 .reduce(G::Group::zero, |mut l, r| {
490 l += r;
491 l
492 })
493 }
494 #[cfg(not(feature = "parallel"))]
495 {
496 let scalars_bigint: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect();
497 G::Group::msm_bigint(&points, &scalars_bigint)
498 }
499 };
500
501 msm_res == G::Group::zero()
502 }
503}
504
505#[cfg(feature = "std")]
506impl<G: CommitmentCurve> SRS<G> {
507 pub fn create_trusted_setup_with_toxic_waste(x: G::ScalarField, depth: usize) -> Self {
516 let m = G::Map::setup();
517
518 let mut x_pow = G::ScalarField::one();
519 let g: Vec<_> = (0..depth)
520 .map(|_| {
521 let res = G::generator().mul(x_pow);
522 x_pow *= x;
523 res.into_affine()
524 })
525 .collect();
526
527 x_pow.zeroize();
529
530 let h = {
532 let mut h = Blake2b512::new();
533 h.update(b"srs_misc");
534 h.update(0_u32.to_be_bytes());
537 point_of_random_bytes(&m, &h.finalize())
538 };
539
540 Self {
541 g,
542 h,
543 lagrange_bases: HashMapCache::new(),
544 }
545 }
546}
547
548#[cfg(feature = "parallel")]
549impl<G: CommitmentCurve> SRS<G>
550where
551 <G as CommitmentCurve>::Map: Sync,
552 G::BaseField: PrimeField,
553{
554 #[must_use]
561 pub fn create_parallel(depth: usize) -> Self {
562 let m = G::Map::setup();
563
564 let g: Vec<_> = (0..depth)
565 .into_par_iter()
566 .map(|i| {
567 let mut h = Blake2b512::new();
568 #[allow(clippy::cast_possible_truncation)]
569 h.update((i as u32).to_be_bytes());
570 point_of_random_bytes(&m, &h.finalize())
571 })
572 .collect();
573
574 let h = {
576 let mut h = Blake2b512::new();
577 h.update(b"srs_misc");
578 h.update(0_u32.to_be_bytes());
581 point_of_random_bytes(&m, &h.finalize())
582 };
583
584 Self {
585 g,
586 h,
587 lagrange_bases: HashMapCache::new(),
588 }
589 }
590}
591
592impl<G> SRSTrait<G> for SRS<G>
593where
594 G: CommitmentCurve,
595{
596 fn max_poly_size(&self) -> usize {
598 self.g.len()
599 }
600
601 fn blinding_commitment(&self) -> G {
602 self.h
603 }
604
605 fn mask_custom(
606 &self,
607 com: PolyComm<G>,
608 blinders: &PolyComm<G::ScalarField>,
609 ) -> Result<BlindedCommitment<G>, CommitmentError> {
610 let commitment = com
611 .zip(blinders)
612 .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))?
613 .map(|(g, b)| {
614 let mut g_masked = self.h.mul(b);
615 g_masked.add_assign(&g);
616 g_masked.into_affine()
617 });
618 Ok(BlindedCommitment {
619 commitment,
620 blinders: blinders.clone(),
621 })
622 }
623
624 #[cfg(feature = "std")]
628 fn mask(
629 &self,
630 comm: PolyComm<G>,
631 rng: &mut (impl RngCore + CryptoRng),
632 ) -> BlindedCommitment<G> {
633 let blinders = comm.map(|_| G::ScalarField::rand(rng));
634 self.mask_custom(comm, &blinders).unwrap()
635 }
636
637 #[cfg(feature = "std")]
638 fn commit_non_hiding(
639 &self,
640 plnm: &DensePolynomial<G::ScalarField>,
641 num_chunks: usize,
642 ) -> PolyComm<G> {
643 let is_zero = plnm.is_zero();
644
645 let mut chunks: Vec<_> = if is_zero {
647 vec![G::zero()]
648 } else if plnm.len() < self.g.len() {
649 vec![G::Group::msm(&self.g[..plnm.len()], &plnm.coeffs)
650 .unwrap()
651 .into_affine()]
652 } else if plnm.len() == self.g.len() {
653 let n = self.g.len();
657 let (r1, r2) = rayon::join(
658 || G::Group::msm(&self.g[..n / 2], &plnm.coeffs[..n / 2]).unwrap(),
659 || G::Group::msm(&self.g[n / 2..n], &plnm.coeffs[n / 2..n]).unwrap(),
660 );
661
662 vec![(r1 + r2).into_affine()]
663 } else {
664 plnm.into_par_iter()
666 .chunks(self.g.len())
667 .map(|chunk| {
668 let chunk_coeffs = chunk
669 .into_iter()
670 .map(|c| c.into_bigint())
671 .collect::<Vec<_>>();
672 let chunk_res = G::Group::msm_bigint(&self.g, &chunk_coeffs);
673 chunk_res.into_affine()
674 })
675 .collect()
676 };
677
678 for _ in chunks.len()..num_chunks {
679 chunks.push(G::zero());
680 }
681
682 PolyComm::<G>::new(chunks)
683 }
684
685 #[cfg(feature = "std")]
686 fn commit(
687 &self,
688 plnm: &DensePolynomial<G::ScalarField>,
689 num_chunks: usize,
690 rng: &mut (impl RngCore + CryptoRng),
691 ) -> BlindedCommitment<G> {
692 self.mask(self.commit_non_hiding(plnm, num_chunks), rng)
693 }
694
695 #[cfg(feature = "std")]
696 fn commit_custom(
697 &self,
698 plnm: &DensePolynomial<G::ScalarField>,
699 num_chunks: usize,
700 blinders: &PolyComm<G::ScalarField>,
701 ) -> Result<BlindedCommitment<G>, CommitmentError> {
702 self.mask_custom(self.commit_non_hiding(plnm, num_chunks), blinders)
703 }
704
705 #[cfg(feature = "std")]
706 fn commit_evaluations_non_hiding(
707 &self,
708 domain: D<G::ScalarField>,
709 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
710 ) -> PolyComm<G> {
711 let basis = &*self.get_lagrange_basis(domain);
712 let commit_evaluations = |evals: &Vec<G::ScalarField>, basis: &Vec<PolyComm<G>>| {
713 let basis_refs: Vec<_> = basis.iter().collect();
714 PolyComm::<G>::multi_scalar_mul(&basis_refs, evals)
715 };
716 match domain.size.cmp(&plnm.domain().size) {
717 core::cmp::Ordering::Less => {
718 #[allow(clippy::cast_possible_truncation)]
719 let s = (plnm.domain().size / domain.size) as usize;
720 let v: Vec<_> = (0..(domain.size())).map(|i| plnm.evals[s * i]).collect();
721 commit_evaluations(&v, basis)
722 }
723 core::cmp::Ordering::Equal => commit_evaluations(&plnm.evals, basis),
724 core::cmp::Ordering::Greater => {
725 panic!("desired commitment domain size ({}) greater than evaluations' domain size ({}):", domain.size, plnm.domain().size)
726 }
727 }
728 }
729
730 #[cfg(feature = "std")]
731 fn commit_evaluations(
732 &self,
733 domain: D<G::ScalarField>,
734 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
735 rng: &mut (impl RngCore + CryptoRng),
736 ) -> BlindedCommitment<G> {
737 self.mask(self.commit_evaluations_non_hiding(domain, plnm), rng)
738 }
739
740 #[cfg(feature = "std")]
741 fn commit_evaluations_custom(
742 &self,
743 domain: D<G::ScalarField>,
744 plnm: &Evaluations<G::ScalarField, D<G::ScalarField>>,
745 blinders: &PolyComm<G::ScalarField>,
746 ) -> Result<BlindedCommitment<G>, CommitmentError> {
747 self.mask_custom(self.commit_evaluations_non_hiding(domain, plnm), blinders)
748 }
749
750 #[cfg(feature = "std")]
751 fn create(depth: usize) -> Self {
752 let m = G::Map::setup();
753
754 let g: Vec<_> = (0..depth)
755 .map(|i| {
756 let mut h = Blake2b512::new();
757 #[allow(clippy::cast_possible_truncation)]
758 h.update((i as u32).to_be_bytes());
759 point_of_random_bytes(&m, &h.finalize())
760 })
761 .collect();
762
763 let h = {
765 let mut h = Blake2b512::new();
766 h.update(b"srs_misc");
767 h.update(0_u32.to_be_bytes());
770 point_of_random_bytes(&m, &h.finalize())
771 };
772
773 Self {
774 g,
775 h,
776 lagrange_bases: HashMapCache::new(),
777 }
778 }
779
780 fn get_lagrange_basis_from_domain_size(
781 &self,
782 domain_size: usize,
783 ) -> impl Deref<Target = Vec<PolyComm<G>>> + '_ {
784 self.lagrange_bases.get_or_generate(domain_size, move || {
785 self.lagrange_basis(D::new(domain_size).unwrap())
786 })
787 }
788
789 fn get_lagrange_basis(
790 &self,
791 domain: D<G::ScalarField>,
792 ) -> impl Deref<Target = Vec<PolyComm<G>>> + '_ {
793 self.lagrange_bases
794 .get_or_generate(domain.size(), move || self.lagrange_basis(domain))
795 }
796
797 #[cfg(feature = "std")]
798 fn size(&self) -> usize {
799 self.g.len()
800 }
801}
802
803#[cfg(feature = "std")]
804impl<G: CommitmentCurve> SRS<G> {
805 #[allow(clippy::type_complexity)]
821 #[allow(clippy::many_single_char_names)]
822 #[allow(clippy::too_many_lines)]
823 pub fn open<EFqSponge, RNG, D: EvaluationDomain<G::ScalarField>, const FULL_ROUNDS: usize>(
824 &self,
825 group_map: &G::Map,
826 plnms: PolynomialsToCombine<G, D>,
827 elm: &[G::ScalarField],
828 polyscale: G::ScalarField,
829 evalscale: G::ScalarField,
830 mut sponge: EFqSponge,
831 rng: &mut RNG,
832 ) -> OpeningProof<G, FULL_ROUNDS>
833 where
834 EFqSponge: Clone + FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>,
835 RNG: RngCore + CryptoRng,
836 G::BaseField: PrimeField,
837 G: EndoCurve,
838 {
839 let (endo_q, endo_r) = endos::<G>();
840
841 let rounds = math::ceil_log2(self.g.len());
842 let padded_length = 1 << rounds;
843
844 let padding = padded_length - self.g.len();
849 let mut g = self.g.clone();
850 g.extend(vec![G::zero(); padding]);
851
852 let (p, blinding_factor) = combine_polys::<G, D>(plnms, polyscale, self.g.len());
859
860 let b_init = {
877 let mut scale = G::ScalarField::one();
879 let mut res: Vec<G::ScalarField> =
880 (0..padded_length).map(|_| G::ScalarField::zero()).collect();
881 for e in elm {
882 for (i, t) in pows(padded_length, *e).iter().enumerate() {
883 res[i] += &(scale * t);
884 }
885 scale *= &evalscale;
886 }
887 res
888 };
889
890 let combined_inner_product = p
892 .coeffs
893 .iter()
894 .zip(b_init.iter())
895 .map(|(a, b)| *a * b)
896 .fold(G::ScalarField::zero(), |acc, x| acc + x);
897
898 sponge.absorb_fr(&[shift_scalar::<G>(combined_inner_product)]);
906
907 let u_base: G = {
910 let t = sponge.challenge_fq();
911 let (x, y) = group_map.to_group(t);
912 G::of_coordinates(x, y)
913 };
914
915 let mut a = p.coeffs;
916 assert!(padded_length >= a.len());
917 a.extend(vec![G::ScalarField::zero(); padded_length - a.len()]);
918
919 let mut b = b_init;
920
921 let mut lr = vec![];
922
923 let mut blinders = vec![];
924
925 let mut chals = vec![];
926 let mut chal_invs = vec![];
927
928 for _ in 0..rounds {
930 let n = g.len() / 2;
931 let (g_lo, g_hi) = (&g[0..n], &g[n..]);
933 let (a_lo, a_hi) = (&a[0..n], &a[n..]);
935 let (b_lo, b_hi) = (&b[0..n], &b[n..]);
937
938 let rand_l = <G::ScalarField as UniformRand>::rand(rng);
940 let rand_r = <G::ScalarField as UniformRand>::rand(rng);
941
942 let l = G::Group::msm_bigint(
944 &[g_lo, &[self.h, u_base]].concat(),
945 &[a_hi, &[rand_l, inner_prod(a_hi, b_lo)]]
946 .concat()
947 .iter()
948 .map(|x| x.into_bigint())
949 .collect::<Vec<_>>(),
950 )
951 .into_affine();
952
953 let r = G::Group::msm_bigint(
954 &[g_hi, &[self.h, u_base]].concat(),
955 &[a_lo, &[rand_r, inner_prod(a_lo, b_hi)]]
956 .concat()
957 .iter()
958 .map(|x| x.into_bigint())
959 .collect::<Vec<_>>(),
960 )
961 .into_affine();
962
963 lr.push((l, r));
964 blinders.push((rand_l, rand_r));
965
966 sponge.absorb_g(&[l]);
967 sponge.absorb_g(&[r]);
968
969 let u_pre = squeeze_prechallenge(&mut sponge);
973 let u = u_pre.to_field(&endo_r);
974 let u_inv = u.inverse().unwrap();
975
976 chals.push(u);
977 chal_invs.push(u_inv);
978
979 a = o1_utils::cfg_iter!(a_hi)
981 .zip(a_lo)
982 .map(|(&hi, &lo)| {
983 let mut res = hi;
985 res *= u_inv;
986 res += &lo;
987 res
988 })
989 .collect();
990
991 b = o1_utils::cfg_iter!(b_lo)
995 .zip(b_hi)
996 .map(|(&lo, &hi)| {
997 let mut res = hi;
999 res *= u;
1000 res += &lo;
1001 res
1002 })
1003 .collect();
1004
1005 g = G::combine_one_endo(endo_r, endo_q, g_lo, g_hi, &u_pre);
1007 }
1008
1009 assert!(
1010 g.len() == 1 && a.len() == 1 && b.len() == 1,
1011 "IPA commitment folding must produce single elements after log rounds"
1012 );
1013 let a0 = a[0];
1014 let b0 = b[0];
1019 let g0 = g[0];
1020
1021 let r_prime = blinders
1031 .iter()
1032 .zip(chals.iter().zip(chal_invs.iter()))
1033 .map(|((rand_l, rand_r), (u, u_inv))| ((*rand_l) * u_inv) + (*rand_r * u))
1034 .fold(blinding_factor, |acc, x| acc + x);
1035
1036 let d = <G::ScalarField as UniformRand>::rand(rng);
1037 let r_delta = <G::ScalarField as UniformRand>::rand(rng);
1038
1039 let delta = ((g0.into_group() + (u_base.mul(b0))).into_affine().mul(d)
1044 + self.h.mul(r_delta))
1045 .into_affine();
1046
1047 sponge.absorb_g(&[delta]);
1048 let c = ScalarChallenge::new(sponge.challenge()).to_field(&endo_r);
1049
1050 let z1 = a0 * c + d;
1052 let z2 = r_prime * c + r_delta;
1053
1054 OpeningProof {
1055 delta,
1056 lr,
1057 z1,
1058 z2,
1059 sg: g0,
1060 }
1061 }
1062}
1063
1064impl<G: CommitmentCurve> SRS<G> {
1065 fn lagrange_basis(&self, domain: D<G::ScalarField>) -> Vec<PolyComm<G>> {
1066 let n = domain.size();
1067
1068 let srs_size = self.g.len();
1146 let num_elems = n.div_ceil(srs_size);
1147 let mut chunks = Vec::with_capacity(num_elems);
1148
1149 for i in 0..num_elems {
1151 let mut lg: Vec<<G as AffineRepr>::Group> = vec![<G as AffineRepr>::Group::zero(); n];
1153 let start_offset = i * srs_size;
1156 let num_terms = min((i + 1) * srs_size, n) - start_offset;
1157 for j in 0..num_terms {
1158 lg[start_offset + j] = self.g[j].into_group();
1159 }
1160 domain.ifft_in_place(&mut lg);
1162 chunks.push(<G as AffineRepr>::Group::normalize_batch(lg.as_mut_slice()));
1165 }
1166
1167 (0..n)
1168 .map(|i| PolyComm {
1169 chunks: chunks.iter().map(|v| v[i]).collect(),
1170 })
1171 .collect()
1172 }
1173}
1174
1175#[serde_as]
1176#[derive(Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq)]
1177#[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")]
1178pub struct OpeningProof<G: AffineRepr, const FULL_ROUNDS: usize> {
1179 #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")]
1181 pub lr: Vec<(G, G)>,
1182 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1183 pub delta: G,
1184 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1185 pub z1: G::ScalarField,
1186 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1187 pub z2: G::ScalarField,
1188 #[serde_as(as = "o1_utils::serialization::SerdeAs")]
1190 pub sg: G,
1191}
1192
1193impl<
1194 BaseField: PrimeField,
1195 G: AffineRepr<BaseField = BaseField> + CommitmentCurve + EndoCurve,
1196 const FULL_ROUNDS: usize,
1197 > crate::OpenProof<G, FULL_ROUNDS> for OpeningProof<G, FULL_ROUNDS>
1198{
1199 type SRS = SRS<G>;
1200
1201 #[cfg(feature = "std")]
1202 fn open<EFqSponge, RNG, D: EvaluationDomain<<G as AffineRepr>::ScalarField>>(
1203 srs: &Self::SRS,
1204 group_map: &<G as CommitmentCurve>::Map,
1205 plnms: PolynomialsToCombine<G, D>,
1206 elm: &[<G as AffineRepr>::ScalarField],
1207 polyscale: <G as AffineRepr>::ScalarField,
1208 evalscale: <G as AffineRepr>::ScalarField,
1209 sponge: EFqSponge,
1210 rng: &mut RNG,
1211 ) -> Self
1212 where
1213 EFqSponge: Clone
1214 + FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1215 RNG: RngCore + CryptoRng,
1216 {
1217 srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng)
1218 }
1219
1220 fn verify<EFqSponge, RNG>(
1221 srs: &Self::SRS,
1222 group_map: &G::Map,
1223 batch: &mut [BatchEvaluationProof<G, EFqSponge, Self, FULL_ROUNDS>],
1224 rng: &mut RNG,
1225 ) -> bool
1226 where
1227 EFqSponge:
1228 FqSponge<<G as AffineRepr>::BaseField, G, <G as AffineRepr>::ScalarField, FULL_ROUNDS>,
1229 RNG: RngCore + CryptoRng,
1230 {
1231 srs.verify(group_map, batch, rng)
1232 }
1233}
1234
1235pub struct Challenges<F> {
1237 pub chal: Vec<F>,
1238 pub chal_inv: Vec<F>,
1239}
1240
1241impl<G: AffineRepr, const FULL_ROUNDS: usize> OpeningProof<G, FULL_ROUNDS> {
1242 pub fn prechallenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1245 &self,
1246 sponge: &mut EFqSponge,
1247 ) -> Vec<ScalarChallenge<G::ScalarField>> {
1248 let _t = sponge.challenge_fq();
1249 self.lr
1250 .iter()
1251 .map(|(l, r)| {
1252 sponge.absorb_g(&[*l]);
1253 sponge.absorb_g(&[*r]);
1254 squeeze_prechallenge(sponge)
1255 })
1256 .collect()
1257 }
1258
1259 pub fn challenges<EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS>>(
1262 &self,
1263 endo_r: &G::ScalarField,
1264 sponge: &mut EFqSponge,
1265 ) -> Challenges<G::ScalarField> {
1266 let chal: Vec<_> = self
1267 .lr
1268 .iter()
1269 .map(|(l, r)| {
1270 sponge.absorb_g(&[*l]);
1271 sponge.absorb_g(&[*r]);
1272 squeeze_challenge(endo_r, sponge)
1273 })
1274 .collect();
1275
1276 let chal_inv = {
1277 let mut cs = chal.clone();
1278 ark_ff::batch_inversion(&mut cs);
1279 cs
1280 };
1281
1282 Challenges { chal, chal_inv }
1283 }
1284}
1285
1286#[cfg(feature = "ocaml_types")]
1287#[allow(non_local_definitions)]
1288pub mod caml {
1289 use super::OpeningProof;
1290 use ark_ec::AffineRepr;
1291 use ocaml;
1292
1293 #[derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)]
1294 pub struct CamlOpeningProof<G, F> {
1295 pub lr: Vec<(G, G)>,
1297 pub delta: G,
1298 pub z1: F,
1299 pub z2: F,
1300 pub sg: G,
1301 }
1302
1303 impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<OpeningProof<G, FULL_ROUNDS>>
1304 for CamlOpeningProof<CamlG, CamlF>
1305 where
1306 G: AffineRepr,
1307 CamlG: From<G>,
1308 CamlF: From<G::ScalarField>,
1309 {
1310 fn from(opening_proof: OpeningProof<G, FULL_ROUNDS>) -> Self {
1311 Self {
1312 lr: opening_proof
1313 .lr
1314 .into_iter()
1315 .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2)))
1316 .collect(),
1317 delta: CamlG::from(opening_proof.delta),
1318 z1: opening_proof.z1.into(),
1319 z2: opening_proof.z2.into(),
1320 sg: CamlG::from(opening_proof.sg),
1321 }
1322 }
1323 }
1324
1325 impl<G, CamlF, CamlG, const FULL_ROUNDS: usize> From<CamlOpeningProof<CamlG, CamlF>>
1326 for OpeningProof<G, FULL_ROUNDS>
1327 where
1328 G: AffineRepr,
1329 CamlG: Into<G>,
1330 CamlF: Into<G::ScalarField>,
1331 {
1332 fn from(caml: CamlOpeningProof<CamlG, CamlF>) -> Self {
1333 Self {
1334 lr: caml
1335 .lr
1336 .into_iter()
1337 .map(|(g1, g2)| (g1.into(), g2.into()))
1338 .collect(),
1339 delta: caml.delta.into(),
1340 z1: caml.z1.into(),
1341 z2: caml.z2.into(),
1342 sg: caml.sg.into(),
1343 }
1344 }
1345 }
1346}