arrabbiata/
witness.rs

1use ark_ec::CurveConfig;
2use ark_ff::PrimeField;
3use ark_poly::Evaluations;
4use kimchi::circuits::{domains::EvaluationDomains, gate::CurrOrNext};
5use log::debug;
6use mina_poseidon::constants::SpongeConstants;
7use num_bigint::{BigInt, BigUint};
8use num_integer::Integer;
9use o1_utils::field_helpers::FieldHelpers;
10use poly_commitment::{commitment::CommitmentCurve, ipa::SRS, PolyComm, SRS as _};
11use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
12
13use crate::{
14    challenge::{ChallengeTerm, Challenges},
15    column::Column,
16    curve::{ArrabbiataCurve, PlonkSpongeConstants},
17    interpreter::{Instruction, InterpreterEnv, Side, VERIFIER_STARTING_INSTRUCTION},
18    setup, NUMBER_OF_COLUMNS, NUMBER_OF_VALUES_TO_ABSORB_PUBLIC_IO,
19};
20
21/// A running program that the (folding) interpreter has access to.
22pub struct Program<
23    Fp: PrimeField,
24    Fq: PrimeField,
25    E: ArrabbiataCurve<ScalarField = Fp, BaseField = Fq>,
26> where
27    E::BaseField: PrimeField,
28    <<E as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
29{
30    /// Commitments to the accumulated program state.
31    ///
32    /// In Nova language, this is the commitment to the witness accumulator.
33    pub accumulated_committed_state: Vec<PolyComm<E>>,
34
35    /// Commitments to the previous program states.
36    ///
37    /// In Nova language, this is the commitment to the previous witness.
38    pub previous_committed_state: Vec<PolyComm<E>>,
39
40    /// Accumulated witness for the program state.
41    ///
42    /// In Nova language, this is the accumulated witness W.
43    ///
44    /// The size of the outer vector must be equal to the number of columns in
45    /// the circuit.
46    /// The size of the inner vector must be equal to the number of rows in
47    /// the circuit.
48    pub accumulated_program_state: Vec<Vec<E::ScalarField>>,
49
50    /// List of the accumulated challenges over time.
51    pub accumulated_challenges: Challenges<BigInt>,
52
53    /// Challenges during the last computation.
54    /// This field is useful to keep track of the challenges that must be
55    /// verified in circuit.
56    pub previous_challenges: Challenges<BigInt>,
57}
58
59impl<Fp: PrimeField, Fq: PrimeField, E: ArrabbiataCurve<ScalarField = Fp, BaseField = Fq>>
60    Program<Fp, Fq, E>
61where
62    E::BaseField: PrimeField,
63    <<E as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
64{
65    pub fn new(srs_size: usize, blinder: E) -> Self {
66        // Default set to the blinders. Using double to make the EC scaling happy.
67        let previous_committed_state: Vec<PolyComm<E>> = (0..NUMBER_OF_COLUMNS)
68            .map(|_| PolyComm::new(vec![(blinder + blinder).into()]))
69            .collect();
70
71        // FIXME: zero will not work.
72        let accumulated_committed_state: Vec<PolyComm<E>> = (0..NUMBER_OF_COLUMNS)
73            .map(|_| PolyComm::new(vec![blinder]))
74            .collect();
75
76        let mut accumulated_program_state: Vec<Vec<E::ScalarField>> =
77            Vec::with_capacity(NUMBER_OF_COLUMNS);
78        {
79            let mut vec: Vec<E::ScalarField> = Vec::with_capacity(srs_size);
80            (0..srs_size).for_each(|_| vec.push(E::ScalarField::zero()));
81            (0..NUMBER_OF_COLUMNS).for_each(|_| accumulated_program_state.push(vec.clone()));
82        };
83
84        let accumulated_challenges: Challenges<BigInt> = Challenges::default();
85
86        let previous_challenges: Challenges<BigInt> = Challenges::default();
87
88        Self {
89            accumulated_committed_state,
90            previous_committed_state,
91            accumulated_program_state,
92            accumulated_challenges,
93            previous_challenges,
94        }
95    }
96
97    /// Commit to the program state and updating the environment with the
98    /// result.
99    ///
100    /// This method is supposed to be called after a new iteration of the
101    /// program has been executed.
102    pub fn commit_state(
103        &mut self,
104        srs: &SRS<E>,
105        domain: EvaluationDomains<E::ScalarField>,
106        witness: &[Vec<BigInt>],
107    ) {
108        let comms: Vec<PolyComm<E>> = witness
109            .par_iter()
110            .map(|evals| {
111                let evals: Vec<E::ScalarField> = evals
112                    .par_iter()
113                    .map(|x| E::ScalarField::from_biguint(&x.to_biguint().unwrap()).unwrap())
114                    .collect();
115                let evals = Evaluations::from_vec_and_domain(evals, domain.d1);
116                srs.commit_evaluations_non_hiding(domain.d1, &evals)
117            })
118            .collect();
119        self.previous_committed_state = comms
120    }
121
122    /// Absorb the last committed program state in the correct sponge.
123    ///
124    /// For a description of the messages to be given to the sponge, including
125    /// the expected instantiation, refer to the section "Message Passing" in
126    /// [crate::interpreter].
127    pub fn absorb_state(&mut self, digest: BigInt) -> Vec<BigInt> {
128        let mut sponge = E::create_new_sponge();
129        let previous_state: E::BaseField =
130            E::BaseField::from_biguint(&digest.to_biguint().unwrap()).unwrap();
131        E::absorb_fq(&mut sponge, previous_state);
132        self.previous_committed_state
133            .iter()
134            .for_each(|comm| E::absorb_curve_points(&mut sponge, &comm.chunks));
135        let state: Vec<BigInt> = sponge
136            .sponge
137            .state
138            .iter()
139            .map(|x| x.to_biguint().into())
140            .collect();
141        state
142    }
143
144    /// Simulate an interaction with the verifier by requesting to coin a
145    /// challenge from the current prover sponge state.
146    ///
147    /// This method supposes that all the messages have been sent to the
148    /// verifier previously, and the attribute [self.prover_sponge_state] has
149    /// been updated accordingly by absorbing all the messages correctly.
150    ///
151    /// The side-effect of this method will be to run a permutation on the
152    /// sponge state _after_ coining the challenge.
153    /// There is an hypothesis on the sponge state that the inner permutation
154    /// has been correctly executed if the absorbtion rate had been reached at
155    /// the last absorbtion.
156    ///
157    /// The challenge will be added to the [self.challenges] attribute at the
158    /// position given by the challenge `chal`.
159    ///
160    /// Internally, the method is implemented by simply loading the prover
161    /// sponge state, and squeezing a challenge from it, relying on the
162    /// implementation of the sponge. Usually, the challenge would be the first
163    /// N bits of the first element, but it is left as an implementation detail
164    /// of the sponge given by the curve.
165    pub fn coin_challenge(&self, sponge_state: Vec<BigInt>) -> (BigInt, Vec<BigInt>) {
166        let mut sponge = E::create_new_sponge();
167        sponge_state.iter().for_each(|x| {
168            E::absorb_fq(
169                &mut sponge,
170                E::BaseField::from_biguint(&x.to_biguint().unwrap()).unwrap(),
171            )
172        });
173        let verifier_answer = E::squeeze_challenge(&mut sponge).to_biguint().into();
174        sponge.sponge.poseidon_block_cipher();
175        let state: Vec<BigInt> = sponge
176            .sponge
177            .state
178            .iter()
179            .map(|x| x.to_biguint().into())
180            .collect();
181        (verifier_answer, state)
182    }
183
184    /// Accumulate the program state (or in other words,
185    /// the witness), by adding the last computed program state into the
186    /// program state accumulator.
187    ///
188    /// This method is supposed to be called after the program state has been
189    /// committed (by calling [self.commit_state]) and absorbed (by calling
190    /// [self.absorb_state]). The "relation randomiser" must also have been
191    /// coined and saved in the environment before, by calling
192    /// [self.coin_challenge].
193    ///
194    /// The program state is accumulated into a different accumulator, depending
195    /// on the curve currently being used.
196    ///
197    /// This is part of the work the prover of the accumulation/folding scheme.
198    ///
199    /// This must translate the following equation:
200    /// ```text
201    /// acc_(p, n + 1) = acc_(p, n) * chal w
202    ///               OR
203    /// acc_(q, n + 1) = acc_(q, n) * chal w
204    /// ```
205    /// where acc and w are vectors of the same size.
206    pub fn accumulate_program_state(&mut self, chal: BigInt, witness: &[Vec<BigInt>]) {
207        let modulus: BigInt = E::ScalarField::modulus_biguint().into();
208        self.accumulated_program_state = self
209            .accumulated_program_state
210            .iter()
211            .zip(witness.iter()) // This iterate over the columns
212            .map(|(evals_accumulator, evals_witness)| {
213                evals_accumulator
214                    .iter()
215                    .zip(evals_witness.iter()) // This iterate over the rows
216                    .map(|(acc, w)| {
217                        let rhs: BigInt = (chal.clone() * w).mod_floor(&modulus);
218                        let rhs: BigUint = rhs.to_biguint().unwrap();
219                        let res = E::ScalarField::from_biguint(&rhs).unwrap();
220                        *acc + res
221                    })
222                    .collect()
223            })
224            .collect();
225    }
226
227    /// Accumulate the committed state by adding the last committed state into
228    /// the committed state accumulator.
229    ///
230    /// The commitments are accumulated into a different accumulator, depending
231    /// on the curve currently being used.
232    ///
233    /// This is part of the work the prover of the accumulation/folding scheme.
234    ///
235    /// This must translate the following equation:
236    /// ```text
237    /// C_(p, n + 1) = C_(p, n) + chal * comm
238    ///               OR
239    /// C_(q, n + 1) = C_(q, n) + chal * comm
240    /// ```
241    ///
242    /// Note that the committed program state is encoded in
243    /// [crate::NUMBER_OF_COLUMNS] values, therefore we must iterate over all
244    /// the columns to accumulate the committed state.
245    pub fn accumulate_committed_state(&mut self, chal: BigInt) {
246        let chal: BigUint = chal.to_biguint().unwrap();
247        let chal: E::ScalarField = E::ScalarField::from_biguint(&chal).unwrap();
248        self.accumulated_committed_state = self
249            .accumulated_committed_state
250            .iter()
251            .zip(self.previous_committed_state.iter())
252            .map(|(l, r)| l + &r.scale(chal))
253            .collect();
254    }
255}
256
257/// An environment is used to contain the state of a long "running program".
258///
259/// The running program is composed of two parts: one user application and one
260/// verifier application. The verifier application is used to encode the
261/// correctness of previous program states computations.
262///
263/// The term "app(lication) state" will be used to refer to the state of the
264/// user application, and the term "IVC state" will be used to refer to the
265/// state of the verifier application. The term program state will be used to refer to
266/// the state of the whole program.
267///
268/// The environment contains all the accumulators that can be picked for a given
269/// fold instance k, including the sponges.
270///
271/// The environment is run over big integers to avoid performing
272/// reduction at all step. Instead the user implementing the interpreter can
273/// reduce in the corresponding field when they want.
274///
275/// The environment is generic over two curves (called E1 and E2) that are
276/// supposed to form a cycle.
277pub struct Env<
278    Fp: PrimeField,
279    Fq: PrimeField,
280    E1: ArrabbiataCurve<ScalarField = Fp, BaseField = Fq>,
281    E2: ArrabbiataCurve<ScalarField = Fq, BaseField = Fp>,
282> where
283    E1::BaseField: PrimeField,
284    E2::BaseField: PrimeField,
285    <<E1 as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
286    <<E2 as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
287{
288    /// The relation this witness environment is related to.
289    pub indexed_relation: setup::IndexedRelation<Fp, Fq, E1, E2>,
290
291    /// Program state for curve E1
292    pub program_e1: Program<Fp, Fq, E1>,
293
294    /// Program state for curve E2
295    pub program_e2: Program<Fq, Fp, E2>,
296
297    // ----------------
298    // Data only used by the interpreter while building the witness over time
299    /// The index of the latest allocated variable in the circuit.
300    /// It is used to allocate new variables without having to keep track of the
301    /// position.
302    pub idx_var: usize,
303
304    pub idx_var_next_row: usize,
305
306    /// The index of the latest allocated public inputs in the circuit.
307    /// It is used to allocate new public inputs without having to keep track of
308    /// the position.
309    pub idx_var_pi: usize,
310
311    /// Current processing row. Used to build the witness.
312    pub current_row: usize,
313
314    /// State of the current row in the execution trace
315    pub state: [BigInt; NUMBER_OF_COLUMNS],
316
317    /// Next row in the execution trace. It is useful when we deal with
318    /// polynomials accessing "the next row", i.e. witness columns where we do
319    /// evaluate at ζ and ζω.
320    pub next_state: [BigInt; NUMBER_OF_COLUMNS],
321
322    /// While folding, we must keep track of the challenges the verifier would
323    /// have sent in the SNARK, and we must aggregate them.
324    // FIXME: nothing is done yet, and the challenges haven't been decided yet.
325    // See top-level documentation of the interpreter for more information.
326    pub challenges: Challenges<BigInt>,
327
328    /// Keep the current executed instruction.
329    /// This can be used to identify which gadget the interpreter is currently
330    /// building.
331    pub current_instruction: Instruction,
332
333    /// The sponges will be used to simulate the verifier messages, and will
334    /// also be used to verify the consistency of the computation by hashing the
335    /// public IO.
336    // IMPROVEME: use a list of BigInt? It might be faster as the CPU will
337    // already have in its cache the values, and we can use a flat array
338    pub sponge_e1: [BigInt; PlonkSpongeConstants::SPONGE_WIDTH],
339    pub sponge_e2: [BigInt; PlonkSpongeConstants::SPONGE_WIDTH],
340
341    /// Sponge state used by the prover for the current iteration.
342    ///
343    /// This sponge is used at the current iteration to absorb commitments of
344    /// the program state and generate the challenges for the current iteration.
345    /// The outputs of the sponge will be verified by the verifier of the next
346    /// iteration.
347    pub prover_sponge_state: [BigInt; PlonkSpongeConstants::SPONGE_WIDTH],
348
349    /// Sponge state used by the verifier for the current iteration.
350    ///
351    /// This sponge is used at the current iteration to build the verifier
352    /// circuit. The state will need to match with the previous prover sopnge
353    /// state.
354    pub verifier_sponge_state: [BigInt; PlonkSpongeConstants::SPONGE_WIDTH],
355
356    /// The current iteration of the IVC.
357    pub current_iteration: u64,
358
359    /// The digest of the program state before executing the last iteration.
360    /// The value will be used to initialize the execution trace of the verifier
361    /// in the next iteration, in particular to verify that the challenges have
362    /// been generated correctly.
363    ///
364    /// The value is a 128bits value.
365    pub last_program_digest_before_execution: BigInt,
366
367    /// The digest of the program state after executing the last iteration.
368    /// The value will be used to initialize the sponge before committing to the
369    /// next iteration.
370    ///
371    /// The value is a 128bits value.
372    pub last_program_digest_after_execution: BigInt,
373
374    /// The coin folding combiner will be used to generate the combinaison of
375    /// folding instances
376    pub r: BigInt,
377
378    /// Temporary registers for elliptic curve points in affine coordinates than
379    /// can be used to save values between instructions.
380    ///
381    /// These temporary registers can be loaded into the state by using the
382    /// function `load_temporary_accumulators`.
383    ///
384    /// The registers can, and must, be cleaned after the gadget is computed.
385    ///
386    /// The values are considered as BigInt, even though we should add some
387    /// type. As we want to apply the KISS method, we tend to avoid adding
388    /// types. We leave this for future work.
389    ///
390    /// Two registers are provided, represented by a tuple for the coordinates
391    /// (x, y).
392    pub temporary_accumulators: ((BigInt, BigInt), (BigInt, BigInt)),
393
394    /// Index of the values to absorb in the sponge
395    pub idx_values_to_absorb: usize,
396    // ----------------
397    /// The witness of the current instance of the circuit.
398    /// The size of the outer vector must be equal to the number of columns in
399    /// the circuit.
400    /// The size of the inner vector must be equal to the number of rows in
401    /// the circuit.
402    ///
403    /// The layout columns/rows is used to avoid rebuilding the witness per
404    /// column when committing to the witness.
405    pub witness: Vec<Vec<BigInt>>,
406
407    // --------------
408    // Inputs
409    /// Initial input
410    pub z0: BigInt,
411
412    /// Current input
413    pub zi: BigInt,
414    // ---------------
415}
416
417impl<
418        Fp: PrimeField,
419        Fq: PrimeField,
420        E1: ArrabbiataCurve<ScalarField = Fp, BaseField = Fq>,
421        E2: ArrabbiataCurve<ScalarField = Fq, BaseField = Fp>,
422    > InterpreterEnv for Env<Fp, Fq, E1, E2>
423where
424    E1::BaseField: PrimeField,
425    E2::BaseField: PrimeField,
426    <<E1 as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
427    <<E2 as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
428{
429    type Position = (Column, CurrOrNext);
430
431    /// For efficiency, and for having a single interpreter, we do not use one
432    /// of the fields. We use a generic BigInt to represent the values.
433    /// When building the witness, we will reduce into the corresponding field.
434    // FIXME: it might not be efficient as I initially thought. We do need to
435    // make some transformations between biguint and bigint, with an extra cost
436    // for allocations.
437    type Variable = BigInt;
438
439    fn allocate(&mut self) -> Self::Position {
440        assert!(self.idx_var < NUMBER_OF_COLUMNS, "Maximum number of columns reached ({NUMBER_OF_COLUMNS}), increase the number of columns");
441        let pos = Column::X(self.idx_var);
442        self.idx_var += 1;
443        (pos, CurrOrNext::Curr)
444    }
445
446    fn allocate_next_row(&mut self) -> Self::Position {
447        assert!(self.idx_var_next_row < NUMBER_OF_COLUMNS, "Maximum number of columns reached ({NUMBER_OF_COLUMNS}), increase the number of columns");
448        let pos = Column::X(self.idx_var_next_row);
449        self.idx_var_next_row += 1;
450        (pos, CurrOrNext::Next)
451    }
452
453    fn read_position(&self, pos: Self::Position) -> Self::Variable {
454        let (col, row) = pos;
455        let Column::X(idx) = col else {
456            unimplemented!("Only works for private inputs")
457        };
458        match row {
459            CurrOrNext::Curr => self.state[idx].clone(),
460            CurrOrNext::Next => self.next_state[idx].clone(),
461        }
462    }
463
464    fn write_column(&mut self, pos: Self::Position, v: Self::Variable) -> Self::Variable {
465        let (col, row) = pos;
466        let Column::X(idx) = col else {
467            unimplemented!("Only works for private inputs")
468        };
469        let (modulus, srs_size): (BigInt, usize) = if self.current_iteration % 2 == 0 {
470            (
471                E1::ScalarField::modulus_biguint().into(),
472                self.indexed_relation.get_srs_size(),
473            )
474        } else {
475            (
476                E2::ScalarField::modulus_biguint().into(),
477                self.indexed_relation.get_srs_size(),
478            )
479        };
480        let v = v.mod_floor(&modulus);
481        match row {
482            CurrOrNext::Curr => {
483                self.state[idx].clone_from(&v);
484            }
485            CurrOrNext::Next => {
486                assert!(self.current_row < srs_size - 1, "The witness builder is writing on the last row. It does not make sense to write on the next row after the last row");
487                self.next_state[idx].clone_from(&v);
488            }
489        }
490        v
491    }
492
493    fn constrain_boolean(&mut self, x: Self::Variable) {
494        let modulus: BigInt = if self.current_iteration % 2 == 0 {
495            E1::ScalarField::modulus_biguint().into()
496        } else {
497            E2::ScalarField::modulus_biguint().into()
498        };
499        let x = x.mod_floor(&modulus);
500        assert!(x == BigInt::from(0_usize) || x == BigInt::from(1_usize));
501    }
502
503    fn constant(&self, v: BigInt) -> Self::Variable {
504        v
505    }
506
507    fn add_constraint(&mut self, _x: Self::Variable) {
508        unimplemented!("Only when building the constraints")
509    }
510
511    fn assert_zero(&mut self, var: Self::Variable) {
512        assert_eq!(var, BigInt::from(0_usize));
513    }
514
515    fn assert_equal(&mut self, x: Self::Variable, y: Self::Variable) {
516        assert_eq!(x, y);
517    }
518
519    fn square(&mut self, pos: Self::Position, x: Self::Variable) -> Self::Variable {
520        let res = x.clone() * x.clone();
521        self.write_column(pos, res.clone());
522        res
523    }
524
525    /// Flagged as unsafe as it does require an additional range check
526    unsafe fn bitmask_be(
527        &mut self,
528        x: &Self::Variable,
529        highest_bit: u32,
530        lowest_bit: u32,
531        pos: Self::Position,
532    ) -> Self::Variable {
533        let diff: u32 = highest_bit - lowest_bit;
534        if diff == 0 {
535            self.write_column(pos, BigInt::from(0_usize))
536        } else {
537            assert!(
538                diff > 0,
539                "The difference between the highest and lowest bit should be greater than 0"
540            );
541            let rht = (BigInt::from(1_usize) << diff) - BigInt::from(1_usize);
542            let lft = x >> lowest_bit;
543            let res: BigInt = lft & rht;
544            self.write_column(pos, res)
545        }
546    }
547
548    // FIXME: for now, we use the row number and compute the square.
549    // This is only for testing purposes, and having something to build the
550    // witness.
551    fn fetch_input(&mut self, pos: Self::Position) -> Self::Variable {
552        let x = BigInt::from(self.current_row as u64);
553        self.write_column(pos, x.clone());
554        x
555    }
556
557    /// Reset the environment to build the next row
558    fn reset(&mut self) {
559        // Save the current state in the witness
560        self.state.iter().enumerate().for_each(|(i, x)| {
561            self.witness[i][self.current_row].clone_from(x);
562        });
563        // We increment the row
564        // TODO: should we check that we are not going over the domain size?
565        self.current_row += 1;
566        // We reset the indices for the variables
567        self.idx_var = 0;
568        self.idx_var_next_row = 0;
569        self.idx_var_pi = 0;
570        // We keep track of the values we already set.
571        self.state.clone_from(&self.next_state);
572        // And we reset the next state
573        self.next_state = core::array::from_fn(|_| BigInt::from(0_usize));
574    }
575
576    /// FIXME: check if we need to pick the left or right sponge
577    fn coin_folding_combiner(&mut self, pos: Self::Position) -> Self::Variable {
578        let r = if self.current_iteration % 2 == 0 {
579            self.sponge_e1[0].clone()
580        } else {
581            self.sponge_e2[0].clone()
582        };
583        let (col, _) = pos;
584        let Column::X(idx) = col else {
585            unimplemented!("Only works for private columns")
586        };
587        self.state[idx].clone_from(&r);
588        self.r.clone_from(&r);
589        r
590    }
591
592    fn load_poseidon_state(&mut self, pos: Self::Position, i: usize) -> Self::Variable {
593        let state = if self.current_iteration % 2 == 0 {
594            self.sponge_e1[i].clone()
595        } else {
596            self.sponge_e2[i].clone()
597        };
598        self.write_column(pos, state)
599    }
600
601    fn get_poseidon_round_constant(&self, round: usize, i: usize) -> Self::Variable {
602        if self.current_iteration % 2 == 0 {
603            E1::sponge_params().round_constants[round][i]
604                .to_biguint()
605                .into()
606        } else {
607            E2::sponge_params().round_constants[round][i]
608                .to_biguint()
609                .into()
610        }
611    }
612
613    fn get_poseidon_mds_matrix(&mut self, i: usize, j: usize) -> Self::Variable {
614        if self.current_iteration % 2 == 0 {
615            E1::sponge_params().mds[i][j].to_biguint().into()
616        } else {
617            E2::sponge_params().mds[i][j].to_biguint().into()
618        }
619    }
620
621    unsafe fn save_poseidon_state(&mut self, x: Self::Variable, i: usize) {
622        if self.current_iteration % 2 == 0 {
623            let modulus: BigInt = E1::ScalarField::modulus_biguint().into();
624            self.sponge_e1[i] = x.mod_floor(&modulus)
625        } else {
626            let modulus: BigInt = E2::ScalarField::modulus_biguint().into();
627            self.sponge_e2[i] = x.mod_floor(&modulus)
628        }
629    }
630
631    unsafe fn fetch_value_to_absorb(&mut self, pos: Self::Position) -> Self::Variable {
632        let (col, curr_or_next) = pos;
633        // Purely arbitrary for now
634        let Column::X(_idx) = col else {
635            panic!("Only private inputs can be accepted to load the values to be absorbed")
636        };
637        assert_eq!(
638            curr_or_next,
639            CurrOrNext::Curr,
640            "Only the current row can be used to load the values to be absorbed"
641        );
642        // FIXME: for now, we only absorb the commitments to the columns
643        let idx = self.idx_values_to_absorb;
644        let res = if idx < 2 * NUMBER_OF_COLUMNS {
645            let idx_col = idx / 2;
646            debug!("Absorbing the accumulator for the column index {idx_col}. After this, there will still be {} elements to absorb", NUMBER_OF_VALUES_TO_ABSORB_PUBLIC_IO - idx - 1);
647            if self.current_iteration % 2 == 0 {
648                let (pt_x, pt_y) = self.program_e2.accumulated_committed_state[idx_col]
649                    .get_first_chunk()
650                    .to_coordinates()
651                    .unwrap();
652                if idx % 2 == 0 {
653                    self.write_column(pos, pt_x.to_biguint().into())
654                } else {
655                    self.write_column(pos, pt_y.to_biguint().into())
656                }
657            } else {
658                let (pt_x, pt_y) = self.program_e1.accumulated_committed_state[idx_col]
659                    .get_first_chunk()
660                    .to_coordinates()
661                    .unwrap();
662                if idx % 2 == 0 {
663                    self.write_column(pos, pt_x.to_biguint().into())
664                } else {
665                    self.write_column(pos, pt_y.to_biguint().into())
666                }
667            }
668        } else {
669            unimplemented!("We only absorb the accumulators for now. Of course, this is not sound.")
670        };
671        self.idx_values_to_absorb += 1;
672        res
673    }
674
675    unsafe fn load_temporary_accumulators(
676        &mut self,
677        pos_x: Self::Position,
678        pos_y: Self::Position,
679        side: Side,
680    ) -> (Self::Variable, Self::Variable) {
681        match self.current_instruction {
682            Instruction::EllipticCurveScaling(i_comm, bit) => {
683                // If we're processing the leftmost bit (i.e. bit == 0), we must load
684                // the initial value into the accumulators from the environment.
685                // In the left accumulator, we keep track of the value we keep doubling.
686                // In the right accumulator, we keep the result.
687                if bit == 0 {
688                    if self.current_iteration % 2 == 0 {
689                        match side {
690                            Side::Left => {
691                                let pt = self.program_e2.previous_committed_state[i_comm].get_first_chunk();
692                                // We suppose we never have a commitment equals to the
693                                // point at infinity
694                                let (pt_x, pt_y) = pt.to_coordinates().unwrap();
695                                let pt_x = self.write_column(pos_x, pt_x.to_biguint().into());
696                                let pt_y = self.write_column(pos_y, pt_y.to_biguint().into());
697                                (pt_x, pt_y)
698                            }
699                            // As it is the first iteration, we must use the point at infinity.
700                            // However, to avoid handling the case equal to zero, we will
701                            // use a blinder, that we will substract at the end.
702                            // As we suppose the probability to get a folding combiner
703                            // equals to zero is negligible, we know we have a negligible
704                            // probability to request to compute `0 * P`.
705                            // FIXME: ! check this statement !
706                            Side::Right => {
707                                let pt = self.indexed_relation.srs_e2.h;
708                                let (pt_x, pt_y) = pt.to_coordinates().unwrap();
709                                let pt_x = self.write_column(pos_x, pt_x.to_biguint().into());
710                                let pt_y = self.write_column(pos_y, pt_y.to_biguint().into());
711                                (pt_x, pt_y)
712                            }
713                        }
714                    } else {
715                        match side {
716                            Side::Left => {
717                                let pt = self.program_e1.previous_committed_state[i_comm].get_first_chunk();
718                                // We suppose we never have a commitment equals to the
719                                // point at infinity
720                                let (pt_x, pt_y) = pt.to_coordinates().unwrap();
721                                let pt_x = self.write_column(pos_x, pt_x.to_biguint().into());
722                                let pt_y = self.write_column(pos_y, pt_y.to_biguint().into());
723                                (pt_x, pt_y)
724                            }
725                            // As it is the first iteration, we must use the point at infinity.
726                            // However, to avoid handling the case equal to zero, we will
727                            // use a blinder, that we will substract at the end.
728                            // As we suppose the probability to get a folding combiner
729                            // equals to zero is negligible, we know we have a negligible
730                            // probability to request to compute `0 * P`.
731                            // FIXME: ! check this statement !
732                            Side::Right => {
733                                let blinder = self.indexed_relation.srs_e1.h;
734                                let pt = blinder;
735                                let (pt_x, pt_y) = pt.to_coordinates().unwrap();
736                                let pt_x = self.write_column(pos_x, pt_x.to_biguint().into());
737                                let pt_y = self.write_column(pos_x, pt_y.to_biguint().into());
738                                (pt_x, pt_y)
739                            }
740                        }
741                    }
742                } else {
743                    panic!("We should not load the temporary accumulators for the bits different than 0 when using the elliptic curve scaling. It has been deactivated since we use the 'next row'");
744                }
745            }
746            Instruction::EllipticCurveAddition(i_comm) => {
747                // FIXME: we must get the scaled commitment, not simply the commitment
748                let (pt_x, pt_y): (BigInt, BigInt) = match side {
749                    Side::Left => {
750                        if self.current_iteration % 2 == 0 {
751                            let pt = self.program_e2.accumulated_committed_state[i_comm].get_first_chunk();
752                            let (x, y) = pt.to_coordinates().unwrap();
753                            (x.to_biguint().into(), y.to_biguint().into())
754                        } else {
755                            let pt = self.program_e1.accumulated_committed_state[i_comm].get_first_chunk();
756                            let (x, y) = pt.to_coordinates().unwrap();
757                            (x.to_biguint().into(), y.to_biguint().into())
758                        }
759                    }
760                    Side::Right => {
761                        if self.current_iteration % 2 == 0 {
762                            let pt = self.program_e2.previous_committed_state[i_comm].get_first_chunk();
763                            let (x, y) = pt.to_coordinates().unwrap();
764                            (x.to_biguint().into(), y.to_biguint().into())
765                        } else {
766                            let pt = self.program_e1.previous_committed_state[i_comm].get_first_chunk();
767                            let (x, y) = pt.to_coordinates().unwrap();
768                            (x.to_biguint().into(), y.to_biguint().into())
769                        }
770                    }
771                };
772                let pt_x = self.write_column(pos_x, pt_x.clone());
773                let pt_y = self.write_column(pos_y, pt_y.clone());
774                (pt_x, pt_y)
775            }
776            _ => unimplemented!("For now, the accumulators can only be used by the elliptic curve scaling gadget and {:?} is not supported. This should be changed as soon as the gadget is implemented.", self.current_instruction),
777        }
778    }
779
780    unsafe fn save_temporary_accumulators(
781        &mut self,
782        x: Self::Variable,
783        y: Self::Variable,
784        side: Side,
785    ) {
786        match side {
787            Side::Left => {
788                self.temporary_accumulators.0 = (x, y);
789            }
790            Side::Right => {
791                self.temporary_accumulators.1 = (x, y);
792            }
793        }
794    }
795
796    // It is unsafe as no constraint is added
797    unsafe fn is_same_ec_point(
798        &mut self,
799        pos: Self::Position,
800        x1: Self::Variable,
801        y1: Self::Variable,
802        x2: Self::Variable,
803        y2: Self::Variable,
804    ) -> Self::Variable {
805        let res = if x1 == x2 && y1 == y2 {
806            BigInt::from(1_usize)
807        } else {
808            BigInt::from(0_usize)
809        };
810        self.write_column(pos, res)
811    }
812
813    fn zero(&self) -> Self::Variable {
814        BigInt::from(0_usize)
815    }
816
817    fn one(&self) -> Self::Variable {
818        BigInt::from(1_usize)
819    }
820
821    /// Inverse of a variable
822    ///
823    /// # Safety
824    ///
825    /// Zero is not allowed as an input.
826    unsafe fn inverse(&mut self, pos: Self::Position, x: Self::Variable) -> Self::Variable {
827        let res = if self.current_iteration % 2 == 0 {
828            E1::ScalarField::from_biguint(&x.to_biguint().unwrap())
829                .unwrap()
830                .inverse()
831                .unwrap()
832                .to_biguint()
833                .into()
834        } else {
835            E2::ScalarField::from_biguint(&x.to_biguint().unwrap())
836                .unwrap()
837                .inverse()
838                .unwrap()
839                .to_biguint()
840                .into()
841        };
842        self.write_column(pos, res)
843    }
844
845    fn compute_lambda(
846        &mut self,
847        pos: Self::Position,
848        is_same_point: Self::Variable,
849        x1: Self::Variable,
850        y1: Self::Variable,
851        x2: Self::Variable,
852        y2: Self::Variable,
853    ) -> Self::Variable {
854        let modulus: BigInt = if self.current_iteration % 2 == 0 {
855            E1::ScalarField::modulus_biguint().into()
856        } else {
857            E2::ScalarField::modulus_biguint().into()
858        };
859        // If it is not the same point, we compute lambda as:
860        // - λ = (Y1 - Y2) / (X1 - X2)
861        let (num, denom): (BigInt, BigInt) = if is_same_point == BigInt::from(0_usize) {
862            let num: BigInt = y1.clone() - y2.clone();
863            let x1_minus_x2: BigInt = (x1.clone() - x2.clone()).mod_floor(&modulus);
864            // We temporarily store the inverse of the denominator into the
865            // given position.
866            let denom = unsafe { self.inverse(pos, x1_minus_x2) };
867            (num, denom)
868        } else {
869            // Otherwise, we compute λ as:
870            // - λ = (3X1^2 + a) / (2Y1)
871            let denom = {
872                let double_y1 = y1.clone() + y1.clone();
873                // We temporarily store the inverse of the denominator into the
874                // given position.
875                unsafe { self.inverse(pos, double_y1) }
876            };
877            let num = {
878                let a: BigInt = if self.current_iteration % 2 == 0 {
879                    let a: E2::BaseField = E2::get_curve_params().0;
880                    a.to_biguint().into()
881                } else {
882                    let a: E1::BaseField = E1::get_curve_params().0;
883                    a.to_biguint().into()
884                };
885                let x1_square = x1.clone() * x1.clone();
886                let two_x1_square = x1_square.clone() + x1_square.clone();
887                two_x1_square + x1_square + a
888            };
889            (num, denom)
890        };
891        let res = (num * denom).mod_floor(&modulus);
892        self.write_column(pos, res)
893    }
894
895    /// Double the elliptic curve point given by the affine coordinates
896    /// `(x1, y1)` and save the result in the registers `pos_x` and `pos_y`.
897    fn double_ec_point(
898        &mut self,
899        pos_x: Self::Position,
900        pos_y: Self::Position,
901        x1: Self::Variable,
902        y1: Self::Variable,
903    ) -> (Self::Variable, Self::Variable) {
904        let modulus: BigInt = if self.current_iteration % 2 == 0 {
905            E1::ScalarField::modulus_biguint().into()
906        } else {
907            E2::ScalarField::modulus_biguint().into()
908        };
909        // - λ = (3X1^2 + a) / (2Y1)
910        // We compute λ and use an additional column as a temporary value
911        // otherwise, we get a constraint of degree higher than 5
912        let lambda_pos = self.allocate();
913        let denom = {
914            let double_y1 = y1.clone() + y1.clone();
915            // We temporarily store the inverse of the denominator into the
916            // given position.
917            unsafe { self.inverse(lambda_pos, double_y1) }
918        };
919        let num = {
920            let a: BigInt = if self.current_iteration % 2 == 0 {
921                let a: E2::BaseField = E2::get_curve_params().0;
922                a.to_biguint().into()
923            } else {
924                let a: E1::BaseField = E1::get_curve_params().0;
925                a.to_biguint().into()
926            };
927            let x1_square = x1.clone() * x1.clone();
928            let two_x1_square = x1_square.clone() + x1_square.clone();
929            two_x1_square + x1_square + a
930        };
931        let lambda = (num * denom).mod_floor(&modulus);
932        self.write_column(lambda_pos, lambda.clone());
933        // - X3 = λ^2 - X1 - X2
934        let x3 = {
935            let double_x1 = x1.clone() + x1.clone();
936            let res = lambda.clone() * lambda.clone() - double_x1.clone();
937            self.write_column(pos_x, res.clone())
938        };
939        // - Y3 = λ(X1 - X3) - Y1
940        let y3 = {
941            let x1_minus_x3 = x1.clone() - x3.clone();
942            let res = lambda.clone() * x1_minus_x3 - y1.clone();
943            self.write_column(pos_y, res.clone())
944        };
945        (x3, y3)
946    }
947}
948
949impl<
950        Fp: PrimeField,
951        Fq: PrimeField,
952        E1: ArrabbiataCurve<ScalarField = Fp, BaseField = Fq>,
953        E2: ArrabbiataCurve<ScalarField = Fq, BaseField = Fp>,
954    > Env<Fp, Fq, E1, E2>
955where
956    E1::BaseField: PrimeField,
957    E2::BaseField: PrimeField,
958    <<E1 as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
959    <<E2 as CommitmentCurve>::Params as CurveConfig>::BaseField: PrimeField,
960{
961    pub fn new(z0: BigInt, indexed_relation: setup::IndexedRelation<Fp, Fq, E1, E2>) -> Self {
962        let srs_size = indexed_relation.get_srs_size();
963        let (blinder_e1, blinder_e2) = indexed_relation.get_srs_blinders();
964
965        let mut witness: Vec<Vec<BigInt>> = Vec::with_capacity(NUMBER_OF_COLUMNS);
966        {
967            let mut vec: Vec<BigInt> = Vec::with_capacity(srs_size);
968            (0..srs_size).for_each(|_| vec.push(BigInt::from(0_usize)));
969            (0..NUMBER_OF_COLUMNS).for_each(|_| witness.push(vec.clone()));
970        };
971
972        // Initialize Program instances for both curves
973        let program_e1 = Program::new(srs_size, blinder_e1);
974        let program_e2 = Program::new(srs_size, blinder_e2);
975
976        // FIXME: challenges
977        let challenges: Challenges<BigInt> = Challenges::default();
978
979        // FIXME: use setup
980        let prover_sponge_state: [BigInt; PlonkSpongeConstants::SPONGE_WIDTH] =
981            core::array::from_fn(|_| BigInt::from(0_u64));
982        let verifier_sponge_state: [BigInt; PlonkSpongeConstants::SPONGE_WIDTH] =
983            core::array::from_fn(|_| BigInt::from(0_u64));
984
985        // FIXME: set this up correctly. Temporary as we're moving the initial
986        // transcript state into the setup
987        let sponge_e1 = indexed_relation.initial_sponge.clone();
988        let sponge_e2 = indexed_relation.initial_sponge.clone();
989
990        Self {
991            // -------
992            // Setup
993            indexed_relation,
994            // -------
995            // Program state for each curve
996            program_e1,
997            program_e2,
998            // ------
999            // ------
1000            idx_var: 0,
1001            idx_var_next_row: 0,
1002            idx_var_pi: 0,
1003            current_row: 0,
1004            state: core::array::from_fn(|_| BigInt::from(0_usize)),
1005            next_state: core::array::from_fn(|_| BigInt::from(0_usize)),
1006
1007            challenges,
1008
1009            current_instruction: VERIFIER_STARTING_INSTRUCTION,
1010            sponge_e1,
1011            sponge_e2,
1012            prover_sponge_state,
1013            verifier_sponge_state,
1014            current_iteration: 0,
1015            // FIXME: set a correct value
1016            last_program_digest_before_execution: BigInt::from(0_u64),
1017            // FIXME: set a correct value
1018            last_program_digest_after_execution: BigInt::from(0_u64),
1019            r: BigInt::from(0_usize),
1020            // Initialize the temporary accumulators with 0
1021            temporary_accumulators: (
1022                (BigInt::from(0_u64), BigInt::from(0_u64)),
1023                (BigInt::from(0_u64), BigInt::from(0_u64)),
1024            ),
1025            idx_values_to_absorb: 0,
1026            // ------
1027            // ------
1028            // Used by the interpreter
1029            // Used to allocate variables
1030            // Witness builder related
1031            witness,
1032            // ------
1033            // Inputs
1034            z0: z0.clone(),
1035            zi: z0,
1036        }
1037    }
1038
1039    /// Reset the environment to build the next iteration
1040    pub fn reset_for_next_iteration(&mut self) {
1041        // Rest the state for the next row
1042        self.current_row = 0;
1043        self.state = core::array::from_fn(|_| BigInt::from(0_usize));
1044        self.idx_var = 0;
1045        self.current_instruction = VERIFIER_STARTING_INSTRUCTION;
1046        self.idx_values_to_absorb = 0;
1047    }
1048
1049    /// The blinder used to commit, to avoid committing to the zero polynomial
1050    /// and accumulated in the IVC.
1051    ///
1052    /// It is part of the instance, and it is accumulated in the IVC.
1053    pub fn accumulate_commitment_blinder(&mut self) {
1054        // TODO
1055    }
1056
1057    /// Commit to the program state and updating the environment with the
1058    /// result.
1059    ///
1060    /// This method is supposed to be called after a new iteration of the
1061    /// program has been executed.
1062    pub fn commit_state(&mut self) {
1063        if self.current_iteration % 2 == 0 {
1064            assert_eq!(
1065                self.current_row as u64,
1066                self.indexed_relation.domain_fp.d1.size,
1067                "The program has not been fully executed. Missing {} rows",
1068                self.indexed_relation.domain_fp.d1.size - self.current_row as u64,
1069            );
1070            // Use program_e1's commit_state method
1071            self.program_e1.commit_state(
1072                &self.indexed_relation.srs_e1,
1073                self.indexed_relation.domain_fp,
1074                &self.witness,
1075            )
1076        } else {
1077            assert_eq!(
1078                self.current_row as u64,
1079                self.indexed_relation.domain_fq.d1.size,
1080                "The program has not been fully executed. Missing {} rows",
1081                self.indexed_relation.domain_fq.d1.size - self.current_row as u64,
1082            );
1083            // Use program_e2's commit_state method
1084            self.program_e2.commit_state(
1085                &self.indexed_relation.srs_e2,
1086                self.indexed_relation.domain_fq,
1087                &self.witness,
1088            )
1089        }
1090    }
1091
1092    /// Absorb the last committed program state in the correct sponge.
1093    ///
1094    /// For a description of the messages to be given to the sponge, including
1095    /// the expected instantiation, refer to the section "Message Passing" in
1096    /// [crate::interpreter].
1097    pub fn absorb_state(&mut self) {
1098        let state = if self.current_iteration % 2 == 0 {
1099            // Use program_e1's absorb_state method
1100            let state = self
1101                .program_e1
1102                .absorb_state(self.last_program_digest_after_execution.clone());
1103            state.try_into().unwrap()
1104        } else {
1105            // Use program_e2's absorb_state method
1106            let state = self
1107                .program_e2
1108                .absorb_state(self.last_program_digest_after_execution.clone());
1109            state.try_into().unwrap()
1110        };
1111
1112        self.prover_sponge_state = state;
1113    }
1114
1115    /// Compute the output of the application on the previous output
1116    // TODO: we should compute the hash of the previous commitments, only on
1117    // CPU?
1118    pub fn compute_output(&mut self) {
1119        self.zi = BigInt::from(42_usize)
1120    }
1121
1122    pub fn fetch_instruction(&self) -> Instruction {
1123        self.current_instruction
1124    }
1125
1126    /// Simulate an interaction with the verifier by requesting to coin a
1127    /// challenge from the current prover sponge state.
1128    ///
1129    /// This method supposes that all the messages have been sent to the
1130    /// verifier previously, and the attribute [self.prover_sponge_state] has
1131    /// been updated accordingly by absorbing all the messages correctly.
1132    ///
1133    /// The side-effect of this method will be to run a permutation on the
1134    /// sponge state _after_ coining the challenge.
1135    /// There is an hypothesis on the sponge state that the inner permutation
1136    /// has been correctly executed if the absorbtion rate had been reached at
1137    /// the last absorbtion.
1138    ///
1139    /// The challenge will be added to the [self.challenges] attribute at the
1140    /// position given by the challenge `chal`.
1141    ///
1142    /// Internally, the method is implemented by simply loading the prover
1143    /// sponge state, and squeezing a challenge from it, relying on the
1144    /// implementation of the sponge. Usually, the challenge would be the first
1145    /// N bits of the first element, but it is left as an implementation detail
1146    /// of the sponge given by the curve.
1147    pub fn coin_challenge(&mut self, chal: ChallengeTerm) {
1148        let sponge_state_vec: Vec<BigInt> = self.prover_sponge_state.to_vec();
1149
1150        let (verifier_answer, new_state) = if self.current_iteration % 2 == 0 {
1151            self.program_e1.coin_challenge(sponge_state_vec)
1152        } else {
1153            self.program_e2.coin_challenge(sponge_state_vec)
1154        };
1155
1156        self.challenges[chal] = verifier_answer;
1157        self.prover_sponge_state = new_state.try_into().unwrap();
1158    }
1159
1160    /// Accumulate the program state (or in other words,
1161    /// the witness), by adding the last computed program state into the
1162    /// program state accumulator.
1163    ///
1164    /// This method is supposed to be called after the program state has been
1165    /// committed (by calling [self.commit_state]) and absorbed (by calling
1166    /// [self.absorb_state]). The "relation randomiser" must also have been
1167    /// coined and saved in the environment before, by calling
1168    /// [self.coin_challenge].
1169    ///
1170    /// The program state is accumulated into a different accumulator, depending
1171    /// on the curve currently being used.
1172    ///
1173    /// This is part of the work the prover of the accumulation/folding scheme.
1174    ///
1175    /// This must translate the following equation:
1176    /// ```text
1177    /// acc_(p, n + 1) = acc_(p, n) * chal w
1178    ///               OR
1179    /// acc_(q, n + 1) = acc_(q, n) * chal w
1180    /// ```
1181    /// where acc and w are vectors of the same size.
1182    pub fn accumulate_program_state(&mut self) {
1183        let chal = self.challenges[ChallengeTerm::RelationCombiner].clone();
1184
1185        if self.current_iteration % 2 == 0 {
1186            self.program_e1
1187                .accumulate_program_state(chal, &self.witness);
1188        } else {
1189            self.program_e2
1190                .accumulate_program_state(chal, &self.witness);
1191        }
1192    }
1193
1194    /// Accumulate the committed state by adding the last committed state into
1195    /// the committed state accumulator.
1196    ///
1197    /// The commitments are accumulated into a different accumulator, depending
1198    /// on the curve currently being used.
1199    ///
1200    /// This is part of the work the prover of the accumulation/folding scheme.
1201    ///
1202    /// This must translate the following equation:
1203    /// ```text
1204    /// C_(p, n + 1) = C_(p, n) + chal * comm
1205    ///               OR
1206    /// C_(q, n + 1) = C_(q, n) + chal * comm
1207    /// ```
1208    ///
1209    /// Note that the committed program state is encoded in
1210    /// [crate::NUMBER_OF_COLUMNS] values, therefore we must iterate over all
1211    /// the columns to accumulate the committed state.
1212    pub fn accumulate_committed_state(&mut self) {
1213        let chal = self.challenges[ChallengeTerm::RelationCombiner].clone();
1214
1215        if self.current_iteration % 2 == 0 {
1216            self.program_e2.accumulate_committed_state(chal);
1217        } else {
1218            self.program_e1.accumulate_committed_state(chal);
1219        }
1220    }
1221}