o1vm/pickles/
lookup_prover.rs

1use ark_ff::{One, PrimeField, Zero};
2use ark_poly::{univariate::DensePolynomial, Evaluations, Polynomial, Radix2EvaluationDomain};
3use kimchi::{circuits::domains::EvaluationDomains, curve::KimchiCurve, plonk_sponge::FrSponge};
4use mina_poseidon::FqSponge;
5use o1_utils::ExtendedDensePolynomial;
6use poly_commitment::{commitment::absorb_commitment, ipa::SRS, OpenProof, SRS as _};
7//TODO Parallelize
8//use rayon::prelude::*;
9use super::lookup_columns::{ELookup, LookupChallenges, LookupEvalEnvironment};
10use crate::pickles::lookup_columns::*;
11use kimchi::{circuits::expr::l0_1, groupmap::GroupMap};
12use poly_commitment::{ipa::OpeningProof, utils::DensePolynomialOrEvaluations, PolyComm};
13use rand::{CryptoRng, RngCore};
14
15/// This prover takes one Public Input and one Public Output.
16/// It then proves that the sum 1/(beta + table) = PI - PO
17/// where the table term are term from fixed lookup or RAMLookup.
18pub fn lookup_prove<
19    G: KimchiCurve,
20    EFqSponge: FqSponge<G::BaseField, G, G::ScalarField> + Clone,
21    EFrSponge: FrSponge<G::ScalarField>,
22    RNG,
23>(
24    input: LookupProofInput<G::ScalarField>,
25    acc_init: G::ScalarField,
26    srs: &SRS<G>,
27    domain: EvaluationDomains<G::ScalarField>,
28    mut fq_sponge: EFqSponge,
29    constraint: &ELookup<G::ScalarField>,
30    rng: &mut RNG,
31) -> (Proof<G>, G::ScalarField)
32where
33    G::BaseField: PrimeField,
34    RNG: RngCore + CryptoRng,
35{
36    // TODO check that
37    let num_chunk = 8;
38    let LookupProofInput {
39        wires,
40        arity,
41        beta_challenge,
42        gamma_challenge,
43    } = input;
44    // Compute how many inverse wires we need to define pad function accordingly
45    let nb_inv_wires = arity
46        .iter()
47        .max_by(|a, b| a.len().cmp(&b.len()))
48        .unwrap()
49        .len();
50    let pad = |mut vec: Vec<G::ScalarField>| {
51        vec.append(&mut vec![G::ScalarField::zero(); nb_inv_wires]);
52        vec
53    };
54
55    // Compute the 1/beta+sum_i gamma^i value_i for each lookup term
56    // The inversions is computed in batch in the end
57    let mut inverses: Vec<Vec<G::ScalarField>> = wires
58        .iter()
59        .zip(arity)
60        .map(|(inner_vec, arity)| {
61            arity
62                .into_iter()
63                .map(|arity| {
64                    // TODO don't recompute gamma powers everytime
65                    let (res, _) = inner_vec.iter().take(arity).fold(
66                        (beta_challenge, G::ScalarField::one()),
67                        |(acc, gamma_i), x| (acc + gamma_i * x, gamma_i * gamma_challenge),
68                    );
69                    res
70                })
71                .collect()
72        })
73        .map(pad)
74        .collect();
75    // Perform the inversion
76    inverses
77        .iter_mut()
78        .for_each(|inner_vec| ark_ff::batch_inversion(inner_vec));
79    // Compute the accumulator
80    // Init at acc_init
81    let mut partial_sum = acc_init;
82    let mut acc = vec![];
83    for inner in inverses.iter_mut() {
84        for x in inner.iter_mut() {
85            partial_sum += *x;
86            acc.push(partial_sum)
87        }
88    }
89    let acc_final = acc[acc.len()];
90    let columns = ColumnEnv {
91        wires,
92        inverses,
93        acc,
94    };
95    // Interpolating
96    let interpolate_col = |evals: Vec<G::ScalarField>| {
97        Evaluations::<G::ScalarField, Radix2EvaluationDomain<G::ScalarField>>::from_vec_and_domain(
98            evals, domain.d1,
99        )
100        .interpolate()
101    };
102    let columns_poly = columns.my_map(interpolate_col);
103    // Committing
104    // TODO avoid cloning
105    let columns_com = columns_poly
106        .clone()
107        .my_map(|poly| srs.commit_non_hiding(&poly, 1).chunks[0]);
108
109    // eval on d8
110    // TODO: check the degree
111    // TODO: avoid cloning
112    let columns_eval_d8 = columns_poly
113        .clone()
114        .my_map(|poly| poly.evaluate_over_domain_by_ref(domain.d8));
115    // absorbing commit
116    // TODO don't absorb the wires which already have been
117    // TODO avoid cloning
118    columns_com
119        .clone()
120        .into_iter()
121        .for_each(|com| absorb_commitment(&mut fq_sponge, &PolyComm { chunks: vec![com] }));
122
123    // Constraints combiner
124    let alpha: G::ScalarField = fq_sponge.challenge();
125
126    let challenges = LookupChallenges {
127        alpha,
128        beta: beta_challenge,
129        gamma: gamma_challenge,
130    };
131    let eval_env = LookupEvalEnvironment {
132        challenges,
133        columns: &columns_eval_d8,
134        domain: &domain,
135        l0_1: l0_1(domain.d1),
136    };
137    let t_numerator_evaluation: Evaluations<
138        G::ScalarField,
139        Radix2EvaluationDomain<G::ScalarField>,
140    > = constraint.evaluations(&eval_env);
141    let t_numerator_poly = t_numerator_evaluation.interpolate();
142    let (t, rem) = t_numerator_poly.divide_by_vanishing_poly(domain.d1);
143    assert!(!rem.is_zero());
144    let t_commitment = srs.commit_non_hiding(
145        &t, 8, //TODO: check the degree,
146    );
147    // TODO avoid cloning
148    let commitments = AllColumns {
149        cols: columns_com,
150        t_shares: t_commitment.chunks.clone(),
151    };
152    // Absorb t
153    absorb_commitment(&mut fq_sponge, &t_commitment);
154    // evaluate and prepare for IPA proof
155    // TODO check num_chunks and srs length
156    let t_chunks = t.to_chunked_polynomial(num_chunk, srs.size());
157    let zeta = fq_sponge.challenge();
158    let zeta_omega = zeta * domain.d1.group_gen;
159    let eval =
160        |x,
161         cols_poly: ColumnEnv<DensePolynomial<_>>,
162         t_chunks: o1_utils::chunked_polynomial::ChunkedPolynomial<_>| AllColumns {
163            cols: cols_poly.my_map(|poly| poly.evaluate(&x)),
164            t_shares: t_chunks
165                .polys
166                .into_iter()
167                .map(|poly| poly.evaluate(&x))
168                .collect(),
169        };
170    // TODO avoid cloning
171    let evaluations = Eval {
172        zeta: eval(zeta, columns_poly.clone(), t_chunks.clone()),
173        zeta_omega: eval(zeta_omega, columns_poly.clone(), t_chunks.clone()),
174    };
175    let fq_sponge_before_evaluations = fq_sponge.clone();
176    // Creating fr_sponge, absorbing eval to create challenges for IPA
177    let mut fr_sponge = EFrSponge::new(G::sponge_params());
178    fr_sponge.absorb(&fq_sponge.digest());
179    // TODO avoid cloning
180    evaluations
181        .clone()
182        .into_iter()
183        .for_each(|x| fr_sponge.absorb(&x));
184    let (_, endo_r) = G::endos();
185    // poly scale
186    let poly_scale_chal = fr_sponge.challenge();
187    let poly_scale = poly_scale_chal.to_field(endo_r);
188    // eval scale
189    let eval_scale_chal = fr_sponge.challenge();
190    let eval_scale = eval_scale_chal.to_field(endo_r);
191    let group_map = G::Map::setup();
192    // prepare polynomials for IPA proof
193    let all_columns_poly = AllColumns {
194        cols: columns_poly,
195        t_shares: t_chunks.polys,
196    };
197    let polynomials: Vec<_> = all_columns_poly.into_iter().collect();
198    let polynomials : Vec<_> = polynomials.iter().map(|poly| {
199        (
200            DensePolynomialOrEvaluations::<_,Radix2EvaluationDomain<G::ScalarField>>::DensePolynomial(poly),
201            // We do not have any blinder, therefore we set to 1.
202            PolyComm::new(vec![G::ScalarField::one()]),
203        )
204    }).collect();
205    let ipa_proof = OpeningProof::open(
206        srs,
207        &group_map,
208        polynomials.as_slice(),
209        &[zeta, zeta_omega],
210        poly_scale,
211        eval_scale,
212        fq_sponge_before_evaluations,
213        rng,
214    );
215    (
216        Proof {
217            commitments,
218            evaluations,
219            ipa_proof,
220        },
221        acc_final,
222    )
223}