o1vm/pickles/
lookup_prover.rs

1use ark_ff::{One, PrimeField, Zero};
2use ark_poly::{univariate::DensePolynomial, Evaluations, Polynomial, Radix2EvaluationDomain};
3use kimchi::{circuits::domains::EvaluationDomains, curve::KimchiCurve, plonk_sponge::FrSponge};
4use mina_poseidon::FqSponge;
5use o1_utils::ExtendedDensePolynomial;
6use poly_commitment::{commitment::absorb_commitment, ipa::SRS, OpenProof, SRS as _};
7//TODO Parralelize
8//use rayon::prelude::*;
9use super::lookup_columns::{ELookup, LookupChallenges, LookupEvalEnvironment};
10use crate::pickles::lookup_columns::*;
11use kimchi::{circuits::expr::l0_1, groupmap::GroupMap};
12use poly_commitment::{ipa::OpeningProof, utils::DensePolynomialOrEvaluations, PolyComm};
13use rand::{CryptoRng, RngCore};
14
15/// This prover takes one Public Input and one Public Output
16/// It then proves that the sum 1/(beta + table) = PI - PO
17/// where the table term are term from fixed lookup or RAMLookup
18
19pub fn lookup_prove<
20    G: KimchiCurve,
21    EFqSponge: FqSponge<G::BaseField, G, G::ScalarField> + Clone,
22    EFrSponge: FrSponge<G::ScalarField>,
23    RNG,
24>(
25    input: LookupProofInput<G::ScalarField>,
26    acc_init: G::ScalarField,
27    srs: &SRS<G>,
28    domain: EvaluationDomains<G::ScalarField>,
29    mut fq_sponge: EFqSponge,
30    constraint: &ELookup<G::ScalarField>,
31    rng: &mut RNG,
32) -> (Proof<G>, G::ScalarField)
33where
34    G::BaseField: PrimeField,
35    RNG: RngCore + CryptoRng,
36{
37    // TODO check that
38    let num_chunk = 8;
39    let LookupProofInput {
40        wires,
41        arity,
42        beta_challenge,
43        gamma_challenge,
44    } = input;
45    // Compute how many inverse wires we need to define pad function accordingly
46    let nb_inv_wires = arity
47        .iter()
48        .max_by(|a, b| a.len().cmp(&b.len()))
49        .unwrap()
50        .len();
51    let pad = |mut vec: Vec<G::ScalarField>| {
52        vec.append(&mut vec![G::ScalarField::zero(); nb_inv_wires]);
53        vec
54    };
55
56    // Compute the 1/beta+sum_i gamma^i value_i for each lookup term
57    // The inversions is commputed in batch in the end
58    let mut inverses: Vec<Vec<G::ScalarField>> = wires
59        .iter()
60        .zip(arity)
61        .map(|(inner_vec, arity)| {
62            arity
63                .into_iter()
64                .map(|arity| {
65                    // TODO don't recompute gamma powers everytime
66                    let (res, _) = inner_vec.iter().take(arity).fold(
67                        (beta_challenge, G::ScalarField::one()),
68                        |(acc, gamma_i), x| (acc + gamma_i * x, gamma_i * gamma_challenge),
69                    );
70                    res
71                })
72                .collect()
73        })
74        .map(pad)
75        .collect();
76    // Perform the inversion
77    inverses
78        .iter_mut()
79        .for_each(|inner_vec| ark_ff::batch_inversion(inner_vec));
80    // Compute the accumulator
81    // Init at acc_init
82    let mut partial_sum = acc_init;
83    let mut acc = vec![];
84    for inner in inverses.iter_mut() {
85        for x in inner.iter_mut() {
86            partial_sum += *x;
87            acc.push(partial_sum)
88        }
89    }
90    let acc_final = acc[acc.len()];
91    let columns = ColumnEnv {
92        wires,
93        inverses,
94        acc,
95    };
96    // Interpolating
97    let interpolate_col = |evals: Vec<G::ScalarField>| {
98        Evaluations::<G::ScalarField, Radix2EvaluationDomain<G::ScalarField>>::from_vec_and_domain(
99            evals, domain.d1,
100        )
101        .interpolate()
102    };
103    let columns_poly = columns.my_map(interpolate_col);
104    // Commiting
105    // TODO avoid cloning
106    let columns_com = columns_poly
107        .clone()
108        .my_map(|poly| srs.commit_non_hiding(&poly, 1).chunks[0]);
109
110    // eval on d8
111    // TODO: check the degree
112    // TODO: avoid cloning
113    let columns_eval_d8 = columns_poly
114        .clone()
115        .my_map(|poly| poly.evaluate_over_domain_by_ref(domain.d8));
116    // abosrbing commit
117    // TODO don't absorb the wires which already have been
118    // TODO avoid cloning
119    columns_com
120        .clone()
121        .into_iter()
122        .for_each(|com| absorb_commitment(&mut fq_sponge, &PolyComm { chunks: vec![com] }));
123
124    // Constraints combiner
125    let alpha: G::ScalarField = fq_sponge.challenge();
126
127    let challenges = LookupChallenges {
128        alpha,
129        beta: beta_challenge,
130        gamma: gamma_challenge,
131    };
132    let eval_env = LookupEvalEnvironment {
133        challenges,
134        columns: &columns_eval_d8,
135        domain: &domain,
136        l0_1: l0_1(domain.d1),
137    };
138    let t_numerator_evaluation: Evaluations<
139        G::ScalarField,
140        Radix2EvaluationDomain<G::ScalarField>,
141    > = constraint.evaluations(&eval_env);
142    let t_numerator_poly = t_numerator_evaluation.interpolate();
143    let (t, rem) = t_numerator_poly.divide_by_vanishing_poly(domain.d1);
144    assert!(!rem.is_zero());
145    let t_commitment = srs.commit_non_hiding(
146        &t, 8, //TODO: check the degree,
147    );
148    // TODO avoid cloning
149    let commitments = AllColumns {
150        cols: columns_com,
151        t_shares: t_commitment.chunks.clone(),
152    };
153    // Absorb t
154    absorb_commitment(&mut fq_sponge, &t_commitment);
155    // evaluate and prepare for IPA proof
156    // TODO check num_chunks and srs length
157    let t_chunks = t.to_chunked_polynomial(num_chunk, srs.size());
158    let zeta = fq_sponge.challenge();
159    let zeta_omega = zeta * domain.d1.group_gen;
160    let eval =
161        |x,
162         cols_poly: ColumnEnv<DensePolynomial<_>>,
163         t_chunks: o1_utils::chunked_polynomial::ChunkedPolynomial<_>| AllColumns {
164            cols: cols_poly.my_map(|poly| poly.evaluate(&x)),
165            t_shares: t_chunks
166                .polys
167                .into_iter()
168                .map(|poly| poly.evaluate(&x))
169                .collect(),
170        };
171    // TODO avoid cloning
172    let evaluations = Eval {
173        zeta: eval(zeta, columns_poly.clone(), t_chunks.clone()),
174        zeta_omega: eval(zeta_omega, columns_poly.clone(), t_chunks.clone()),
175    };
176    let fq_sponge_before_evaluations = fq_sponge.clone();
177    // Creating fr_sponge, absorbing eval to create challenges for IPA
178    let mut fr_sponge = EFrSponge::new(G::sponge_params());
179    fr_sponge.absorb(&fq_sponge.digest());
180    // TODO avoid cloning
181    evaluations
182        .clone()
183        .into_iter()
184        .for_each(|x| fr_sponge.absorb(&x));
185    let (_, endo_r) = G::endos();
186    // poly scale
187    let poly_scale_chal = fr_sponge.challenge();
188    let poly_scale = poly_scale_chal.to_field(endo_r);
189    // eval scale
190    let eval_scale_chal = fr_sponge.challenge();
191    let eval_scale = eval_scale_chal.to_field(endo_r);
192    let group_map = G::Map::setup();
193    // prepare polynomials for IPA proof
194    let all_columns_poly = AllColumns {
195        cols: columns_poly,
196        t_shares: t_chunks.polys,
197    };
198    let polynomials: Vec<_> = all_columns_poly.into_iter().collect();
199    let polynomials : Vec<_> = polynomials.iter().map(|poly| {
200        (
201            DensePolynomialOrEvaluations::<_,Radix2EvaluationDomain<G::ScalarField>>::DensePolynomial(poly),
202            // We do not have any blinder, therefore we set to 1.
203            PolyComm::new(vec![G::ScalarField::one()]),
204        )
205    }).collect();
206    let ipa_proof = OpeningProof::open(
207        srs,
208        &group_map,
209        polynomials.as_slice(),
210        &[zeta, zeta_omega],
211        poly_scale,
212        eval_scale,
213        fq_sponge_before_evaluations,
214        rng,
215    );
216    (
217        Proof {
218            commitments,
219            evaluations,
220            ipa_proof,
221        },
222        acc_final,
223    )
224}