o1vm/pickles/
lookup_verifier.rs

1use crate::pickles::lookup_columns::*;
2use ark_ff::{Field, One, PrimeField, Zero};
3use kimchi::{
4    circuits::{
5        domains::EvaluationDomains,
6        expr::{Constants, PolishToken},
7    },
8    curve::KimchiCurve,
9    groupmap::GroupMap,
10    plonk_sponge::FrSponge,
11};
12use mina_poseidon::{sponge::ScalarChallenge, FqSponge};
13use poly_commitment::{
14    commitment::{absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation},
15    ipa::OpeningProof,
16    OpenProof, PolyComm,
17};
18use rand::thread_rng;
19
20pub fn lookup_verify<
21    G: KimchiCurve,
22    EFqSponge: Clone + FqSponge<G::BaseField, G, G::ScalarField>,
23    EFrSponge: FrSponge<G::ScalarField>,
24>(
25    // input dependant of main proto
26    beta_challenge: G::ScalarField,
27    gamma_challenge: G::ScalarField,
28    constraint: ELookup<G::ScalarField>,
29    mut fq_sponge: EFqSponge,
30    // fixed input
31    // TODO: we don't need the whole domain
32    domain: EvaluationDomains<G::ScalarField>,
33    srs: &<OpeningProof<G> as OpenProof<G>>::SRS,
34    // proof
35    proof: &Proof<G>,
36) -> bool
37where
38    G::BaseField: PrimeField,
39{
40    let Proof {
41        commitments,
42        evaluations,
43        ipa_proof,
44    } = proof;
45
46    ///////
47    // Reproduce plonkish FS challenges
48    //////
49
50    // absorbing commit
51    // TODO don't absorb the wires which already have been
52    // when the same TODO is done in the prover
53    commitments
54        .clone()
55        .cols
56        .into_iter()
57        .for_each(|com| absorb_commitment(&mut fq_sponge, &PolyComm { chunks: vec![com] }));
58
59    let (_, endo_r) = G::endos();
60
61    // Sample α with the Fq-Sponge.
62    let alpha = fq_sponge.challenge();
63
64    // absorb quotient polynomial
65    // TODO: avoid cloning
66    absorb_commitment(
67        &mut fq_sponge,
68        &PolyComm {
69            chunks: commitments.clone().t_shares,
70        },
71    );
72
73    // squeeze zeta
74    // TODO: understand why we use the endo here and for IPA ,
75    // but not for alpha
76    let zeta_chal = ScalarChallenge(fq_sponge.challenge());
77
78    let zeta: G::ScalarField = zeta_chal.to_field(endo_r);
79    let zeta_omega = zeta * domain.d1.group_gen;
80
81    ///////
82    // Verify IPA
83    //////
84
85    let fq_sponge_before_evaluations = fq_sponge.clone();
86
87    // Creating fr_sponge, absorbing eval to create challenges for IPA
88    let mut fr_sponge = EFrSponge::new(G::sponge_params());
89    fr_sponge.absorb(&fq_sponge.digest());
90
91    // TODO avoid cloning
92    evaluations
93        .clone()
94        .into_iter()
95        .for_each(|x| fr_sponge.absorb(&x));
96    // Create IPA challenges
97    // poly scale
98    let polyscale_chal = fr_sponge.challenge();
99    let polyscale = polyscale_chal.to_field(endo_r);
100    // eval scale
101    let evalscale_chal = fr_sponge.challenge();
102    let evalscale = evalscale_chal.to_field(endo_r);
103
104    // Pack the evaluations and commitments
105    // in the right format for IPA
106
107    // Handling columns without quotient
108    let mut eval_for_ipa: Vec<_> = evaluations
109        .clone()
110        .zeta
111        .cols
112        .into_iter()
113        .zip(evaluations.clone().zeta_omega.cols)
114        .zip(commitments.clone().cols)
115        .map(|((zeta, zeta_omega), cm)| Evaluation {
116            commitment: PolyComm { chunks: vec![cm] },
117            evaluations: vec![vec![zeta], vec![zeta_omega]],
118        })
119        .collect();
120    // adding quotient
121    // TODO avoid cloning
122    eval_for_ipa.push(Evaluation {
123        commitment: PolyComm {
124            chunks: commitments.clone().t_shares,
125        },
126        evaluations: vec![
127            evaluations.clone().zeta.t_shares,
128            evaluations.clone().zeta_omega.t_shares,
129        ],
130    });
131
132    // Compute combined eval point
133    let combined_inner_product = {
134        let es: Vec<_> = eval_for_ipa
135            .iter()
136            .map(|Evaluation { evaluations, .. }| evaluations.clone())
137            .collect();
138
139        combined_inner_product(&polyscale, &evalscale, es.as_slice())
140    };
141    let ipa_input = BatchEvaluationProof {
142        sponge: fq_sponge_before_evaluations,
143        evaluations: eval_for_ipa,
144        evaluation_points: vec![zeta, zeta_omega],
145        polyscale,
146        evalscale,
147        opening: ipa_proof,
148        combined_inner_product,
149    };
150    let group_map = G::Map::setup();
151    let ipa_is_correct = OpeningProof::verify(srs, &group_map, &mut [ipa_input], &mut thread_rng());
152
153    ////////
154    // Compute numerator zeta
155    ///////
156
157    let challenges = LookupChallenges {
158        alpha,
159        beta: beta_challenge,
160        gamma: gamma_challenge,
161    };
162    // TODO : we should not care about the kimchi constants here
163    let constants = Constants {
164        endo_coefficient: *endo_r,
165        mds: &G::sponge_params().mds,
166        zk_rows: 0,
167    };
168    let numerator_zeta = PolishToken::evaluate(
169        constraint.to_polish().as_slice(),
170        domain.d1,
171        zeta,
172        evaluations,
173        &constants,
174        &challenges,
175    )
176    .unwrap_or_else(|_| panic!("Could not evaluate quotient polynomial at zeta"));
177
178    ////////
179    // Check quotient correctness
180    ///////
181
182    let zeta_pow_n = zeta.pow([domain.d1.size]);
183    let (quotient_zeta, _) = evaluations.zeta.t_shares.iter().fold(
184        (G::ScalarField::zero(), G::ScalarField::one()),
185        |(res, zeta_i_n), chunk| {
186            let res = res + zeta_i_n * chunk;
187            let zeta_i_n = zeta_i_n * zeta_pow_n;
188            (res, zeta_i_n)
189        },
190    );
191
192    let quotient_is_correct =
193        quotient_zeta == numerator_zeta / (zeta_pow_n - G::ScalarField::one());
194    quotient_is_correct && ipa_is_correct
195}