o1vm/pickles/
lookup_prover.rs

1use ark_ff::{One, PrimeField, Zero};
2use ark_poly::{univariate::DensePolynomial, Evaluations, Polynomial, Radix2EvaluationDomain};
3use kimchi::{circuits::domains::EvaluationDomains, curve::KimchiCurve, plonk_sponge::FrSponge};
4use mina_poseidon::{poseidon::ArithmeticSpongeParams, FqSponge};
5use o1_utils::ExtendedDensePolynomial;
6use poly_commitment::{commitment::absorb_commitment, ipa::SRS, OpenProof, SRS as _};
7//TODO Parallelize
8//use rayon::prelude::*;
9use super::lookup_columns::{ELookup, LookupChallenges, LookupEvalEnvironment};
10use crate::pickles::lookup_columns::*;
11use kimchi::{circuits::expr::l0_1, groupmap::GroupMap};
12use poly_commitment::{ipa::OpeningProof, utils::DensePolynomialOrEvaluations, PolyComm};
13use rand::{CryptoRng, RngCore};
14
15/// This prover takes one Public Input and one Public Output.
16/// It then proves that the sum 1/(beta + table) = PI - PO
17/// where the table term are term from fixed lookup or RAMLookup
18pub fn lookup_prove<const FULL_ROUNDS: usize, G, EFqSponge, EFrSponge, RNG>(
19    input: LookupProofInput<G::ScalarField>,
20    acc_init: G::ScalarField,
21    srs: &SRS<G>,
22    domain: EvaluationDomains<G::ScalarField>,
23    mut fq_sponge: EFqSponge,
24    constraint: &ELookup<G::ScalarField>,
25    rng: &mut RNG,
26) -> (Proof<FULL_ROUNDS, G>, G::ScalarField)
27where
28    G: KimchiCurve<FULL_ROUNDS>,
29    EFqSponge: FqSponge<G::BaseField, G, G::ScalarField, FULL_ROUNDS> + Clone,
30    EFrSponge: FrSponge<G::ScalarField>,
31    EFrSponge: From<&'static ArithmeticSpongeParams<G::ScalarField, FULL_ROUNDS>>,
32    G::BaseField: PrimeField,
33    RNG: RngCore + CryptoRng,
34{
35    // TODO check that
36    let num_chunk = 8;
37    let LookupProofInput {
38        wires,
39        arity,
40        beta_challenge,
41        gamma_challenge,
42    } = input;
43    // Compute how many inverse wires we need to define pad function accordingly
44    let nb_inv_wires = arity
45        .iter()
46        .max_by(|a, b| a.len().cmp(&b.len()))
47        .unwrap()
48        .len();
49    let pad = |mut vec: Vec<G::ScalarField>| {
50        vec.append(&mut vec![G::ScalarField::zero(); nb_inv_wires]);
51        vec
52    };
53
54    // Compute the 1/beta+sum_i gamma^i value_i for each lookup term
55    // The inversions is computed in batch in the end
56    let mut inverses: Vec<Vec<G::ScalarField>> = wires
57        .iter()
58        .zip(arity)
59        .map(|(inner_vec, arity)| {
60            arity
61                .into_iter()
62                .map(|arity| {
63                    // TODO don't recompute gamma powers everytime
64                    let (res, _) = inner_vec.iter().take(arity).fold(
65                        (beta_challenge, G::ScalarField::one()),
66                        |(acc, gamma_i), x| (acc + gamma_i * x, gamma_i * gamma_challenge),
67                    );
68                    res
69                })
70                .collect()
71        })
72        .map(pad)
73        .collect();
74    // Perform the inversion
75    inverses
76        .iter_mut()
77        .for_each(|inner_vec| ark_ff::batch_inversion(inner_vec));
78    // Compute the accumulator
79    // Init at acc_init
80    let mut partial_sum = acc_init;
81    let mut acc = vec![];
82    for inner in inverses.iter_mut() {
83        for x in inner.iter_mut() {
84            partial_sum += *x;
85            acc.push(partial_sum)
86        }
87    }
88    let acc_final = acc[acc.len()];
89    let columns = ColumnEnv {
90        wires,
91        inverses,
92        acc,
93    };
94    // Interpolating
95    let interpolate_col = |evals: Vec<G::ScalarField>| {
96        Evaluations::<G::ScalarField, Radix2EvaluationDomain<G::ScalarField>>::from_vec_and_domain(
97            evals, domain.d1,
98        )
99        .interpolate()
100    };
101    let columns_poly = columns.my_map(interpolate_col);
102    // Committing
103    // TODO avoid cloning
104    let columns_com = columns_poly
105        .clone()
106        .my_map(|poly| srs.commit_non_hiding(&poly, 1).chunks[0]);
107
108    // eval on d8
109    // TODO: check the degree
110    // TODO: avoid cloning
111    let columns_eval_d8 = columns_poly
112        .clone()
113        .my_map(|poly| poly.evaluate_over_domain_by_ref(domain.d8));
114    // absorbing commit
115    // TODO don't absorb the wires which already have been
116    // TODO avoid cloning
117    columns_com
118        .clone()
119        .into_iter()
120        .for_each(|com| absorb_commitment(&mut fq_sponge, &PolyComm { chunks: vec![com] }));
121
122    // Constraints combiner
123    let alpha: G::ScalarField = fq_sponge.challenge();
124
125    let challenges = LookupChallenges {
126        alpha,
127        beta: beta_challenge,
128        gamma: gamma_challenge,
129    };
130    let eval_env = LookupEvalEnvironment {
131        challenges,
132        columns: &columns_eval_d8,
133        domain: &domain,
134        l0_1: l0_1(domain.d1),
135    };
136    let t_numerator_evaluation: Evaluations<
137        G::ScalarField,
138        Radix2EvaluationDomain<G::ScalarField>,
139    > = constraint.evaluations(&eval_env);
140    let t_numerator_poly = t_numerator_evaluation.interpolate();
141    let (t, rem) = t_numerator_poly.divide_by_vanishing_poly(domain.d1);
142    assert!(!rem.is_zero());
143    let t_commitment = srs.commit_non_hiding(
144        &t, 8, //TODO: check the degree,
145    );
146    // TODO avoid cloning
147    let commitments = AllColumns {
148        cols: columns_com,
149        t_shares: t_commitment.chunks.clone(),
150    };
151    // Absorb t
152    absorb_commitment(&mut fq_sponge, &t_commitment);
153    // evaluate and prepare for IPA proof
154    // TODO check num_chunks and srs length
155    let t_chunks = t.to_chunked_polynomial(num_chunk, srs.size());
156    let zeta = fq_sponge.challenge();
157    let zeta_omega = zeta * domain.d1.group_gen;
158    let eval =
159        |x,
160         cols_poly: ColumnEnv<DensePolynomial<_>>,
161         t_chunks: o1_utils::chunked_polynomial::ChunkedPolynomial<_>| AllColumns {
162            cols: cols_poly.my_map(|poly| poly.evaluate(&x)),
163            t_shares: t_chunks
164                .polys
165                .into_iter()
166                .map(|poly| poly.evaluate(&x))
167                .collect(),
168        };
169    // TODO avoid cloning
170    let evaluations = Eval {
171        zeta: eval(zeta, columns_poly.clone(), t_chunks.clone()),
172        zeta_omega: eval(zeta_omega, columns_poly.clone(), t_chunks.clone()),
173    };
174    let fq_sponge_before_evaluations = fq_sponge.clone();
175    // Creating fr_sponge, absorbing eval to create challenges for IPA
176    let mut fr_sponge = EFrSponge::from(G::sponge_params());
177    fr_sponge.absorb(&fq_sponge.digest());
178    // TODO avoid cloning
179    evaluations
180        .clone()
181        .into_iter()
182        .for_each(|x| fr_sponge.absorb(&x));
183    let (_, endo_r) = G::endos();
184    // poly scale
185    let poly_scale_chal = fr_sponge.challenge();
186    let poly_scale = poly_scale_chal.to_field(endo_r);
187    // eval scale
188    let eval_scale_chal = fr_sponge.challenge();
189    let eval_scale = eval_scale_chal.to_field(endo_r);
190    let group_map = G::Map::setup();
191    // prepare polynomials for IPA proof
192    let all_columns_poly = AllColumns {
193        cols: columns_poly,
194        t_shares: t_chunks.polys,
195    };
196    let polynomials: Vec<_> = all_columns_poly.into_iter().collect();
197    let polynomials : Vec<_> = polynomials.iter().map(|poly| {
198        (
199            DensePolynomialOrEvaluations::<_,Radix2EvaluationDomain<G::ScalarField>>::DensePolynomial(poly),
200            // We do not have any blinder, therefore we set to 1.
201            PolyComm::new(vec![G::ScalarField::one()]),
202        )
203    }).collect();
204    let ipa_proof = OpeningProof::open(
205        srs,
206        &group_map,
207        polynomials.as_slice(),
208        &[zeta, zeta_omega],
209        poly_scale,
210        eval_scale,
211        fq_sponge_before_evaluations,
212        rng,
213    );
214    (
215        Proof {
216            commitments,
217            evaluations,
218            ipa_proof,
219        },
220        acc_final,
221    )
222}