-
Notifications
You must be signed in to change notification settings - Fork 656
[DO NOT MERGE INTO MAIN] feat: refactoring basefold API following "virtual oracle" philosophy #2749
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -96,52 +96,21 @@ impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, P: ComputeTcsOpenings<GC, Cp | |
| Ok((commitment, BasefoldProverData { encoded_messages, tcs_prover_data })) | ||
| } | ||
|
|
||
| #[inline] | ||
| pub fn prove_trusted_mle_evaluations( | ||
| #[allow(clippy::type_complexity)] | ||
| pub fn prove_from_prebatched_inputs( | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe |
||
| &self, | ||
| mut eval_point: Point<GC::EF>, | ||
| mle_rounds: Rounds<Message<Mle<GC::F>>>, | ||
| evaluation_claims: Rounds<Evaluations<GC::EF>>, | ||
| prover_data: Rounds<BasefoldProverData<GC::F, P::ProverData>>, | ||
| batched_mle: Mle<GC::EF, CpuBackend>, | ||
| batched_eval_claim: GC::EF, | ||
| batched_codeword: RsCodeWord<GC::F, CpuBackend>, | ||
| prover_datas: Rounds<BasefoldProverData<GC::F, P::ProverData>>, | ||
| challenger: &mut GC::Challenger, | ||
| ) -> Result<BasefoldProof<GC>, BaseFoldConfigProverError<GC, P>> { | ||
| let fri_prover = FriCpuProver::<GC, P>(PhantomData); | ||
| // Get all the mles from all rounds in order. | ||
| let mles = mle_rounds | ||
| .iter() | ||
| .flat_map(|round| round.clone().into_iter()) | ||
| .collect::<Message<Mle<_, _>>>(); | ||
|
|
||
| let encoded_messages = prover_data | ||
| .iter() | ||
| .flat_map(|data| data.encoded_messages.iter().cloned()) | ||
| .collect::<Message<RsCodeWord<_, _>>>(); | ||
|
|
||
| let evaluation_claims = evaluation_claims.into_iter().flatten().collect::<Vec<_>>(); | ||
|
|
||
| // Grind for batch randomness. | ||
| let batch_grinding_witness = challenger.grind(BATCH_GRINDING_BITS); | ||
|
|
||
| // Sample batching coefficients via partial Lagrange basis. | ||
| let total_len = mles.iter().map(|mle| mle.num_polynomials()).sum::<usize>(); | ||
| let num_batching_variables = total_len.next_power_of_two().ilog2(); | ||
| let batching_point = challenger.sample_point::<GC::EF>(num_batching_variables); | ||
|
|
||
| let batching_coefficients = partial_lagrange_blocking(&batching_point); | ||
| let mut current_mle = batched_mle; | ||
| let mut current_codeword = batched_codeword; | ||
|
|
||
| // Batch the mles and codewords. | ||
| let (mle_batch, codeword_batch, batched_eval_claim) = fri_prover.batch( | ||
| &batching_coefficients, | ||
| mles, | ||
| encoded_messages, | ||
| evaluation_claims, | ||
| &self.encoder, | ||
| ); | ||
| // From this point on, run the BaseFold protocol on the random linear combination codeword, | ||
| // the random linear combination multilinear, and the random linear combination of the | ||
| // evaluation claims. | ||
| let mut current_mle = mle_batch; | ||
| let mut current_codeword = codeword_batch; | ||
| // Initialize the vecs that go into a BaseFoldProof. | ||
| let log_len = current_mle.num_variables(); | ||
| let mut univariate_messages: Vec<[GC::EF; 2]> = vec![]; | ||
|
|
@@ -155,10 +124,12 @@ impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, P: ComputeTcsOpenings<GC, Cp | |
| eval_point.dimension() as u32, | ||
| "eval point dimension mismatch" | ||
| ); | ||
|
|
||
| // Observe the number of FRI rounds. In principle, the prover is bound to this number already | ||
| // because it is determined by the heights of the codewords and the log_blowup, but we | ||
| // observe it here for extra security. | ||
| challenger.observe(GC::F::from_canonical_usize(eval_point.dimension())); | ||
| // Main Basefold reduction loop | ||
| for _ in 0..eval_point.dimension() { | ||
| // Compute claims for `g(X_0, X_1, ..., X_{d-1}, 0)` and `g(X_0, X_1, ..., X_{d-1}, 1)`. | ||
| let last_coord = eval_point.remove_last_coordinate(); | ||
|
|
@@ -172,33 +143,37 @@ impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, P: ComputeTcsOpenings<GC, Cp | |
|
|
||
| // Perform a single round of the FRI commit phase, returning the commitment, folded | ||
| // codeword, and folding parameter. | ||
| let (beta, folded_mle, folded_codeword, commitment, leaves, prover_data) = fri_prover | ||
| .commit_phase_round(current_mle, current_codeword, &self.tcs_prover, challenger) | ||
| .map_err(BasefoldProverError::CommitPhaseError)?; | ||
| let (beta, folded_mle, folded_codeword, commitment, leaves, prover_data_round) = | ||
| fri_prover | ||
| .commit_phase_round(current_mle, current_codeword, &self.tcs_prover, challenger) | ||
| .map_err(BasefoldProverError::CommitPhaseError)?; | ||
|
|
||
| fri_commitments.push(commitment); | ||
| commit_phase_data.push(prover_data); | ||
| commit_phase_data.push(prover_data_round); | ||
| commit_phase_values.push(leaves); | ||
|
|
||
| current_mle = folded_mle; | ||
| current_codeword = folded_codeword; | ||
| current_batched_eval_claim = zero_val + beta * one_val; | ||
| } | ||
|
|
||
| // Finalize the constant polynomial | ||
| let final_poly = fri_prover.final_poly(current_codeword); | ||
| challenger.observe_ext_element(final_poly); | ||
|
|
||
| // Proof of work | ||
| let fri_config = self.encoder.config(); | ||
| let pow_bits = fri_config.proof_of_work_bits; | ||
| let pow_witness = challenger.grind(pow_bits); | ||
|
|
||
| // FRI Query Phase. | ||
| let query_indices: Vec<usize> = (0..fri_config.num_queries) | ||
| .map(|_| challenger.sample_bits(log_len as usize + fri_config.log_blowup())) | ||
| .collect(); | ||
|
|
||
| // Open the original polynomials at the query indices. | ||
| // Open each committed polynomial at the query indices. | ||
| let mut component_polynomials_query_openings_and_proofs = vec![]; | ||
| for prover_data in prover_data { | ||
| for prover_data in prover_datas { | ||
| let BasefoldProverData { encoded_messages, tcs_prover_data } = prover_data; | ||
| let values = | ||
| self.tcs_prover.compute_openings_at_indices(encoded_messages, &query_indices); | ||
|
|
@@ -207,8 +182,8 @@ impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, P: ComputeTcsOpenings<GC, Cp | |
| .prove_openings_at_indices(tcs_prover_data, &query_indices) | ||
| .map_err(BaseFoldConfigProverError::<GC, P>::TcsCommitError) | ||
| .unwrap(); | ||
| let opening = MerkleTreeOpeningAndProof::<GC> { values, proof }; | ||
| component_polynomials_query_openings_and_proofs.push(opening); | ||
| component_polynomials_query_openings_and_proofs | ||
| .push(MerkleTreeOpeningAndProof::<GC> { values, proof }); | ||
| } | ||
|
|
||
| // Provide openings for the FRI query phase. | ||
|
|
@@ -218,7 +193,7 @@ impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, P: ComputeTcsOpenings<GC, Cp | |
| for index in indices.iter_mut() { | ||
| *index >>= 1; | ||
| } | ||
| let leaves: Message<Tensor<GC::F>> = leaves.into(); | ||
| let leaves: Message<Tensor<GC::F, CpuBackend>> = leaves.into(); | ||
| let values = self.tcs_prover.compute_openings_at_indices(leaves, &indices); | ||
|
|
||
| let proof = self | ||
|
|
@@ -236,10 +211,68 @@ impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, P: ComputeTcsOpenings<GC, Cp | |
| query_phase_openings_and_proofs, | ||
| final_poly, | ||
| pow_witness, | ||
| batch_grinding_witness, | ||
| batch_grinding_witness: Default::default(), | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: I think this is somewhat risky (someone might forget about it and try to verify this proof). Maybe the return-type of this function should be an |
||
| }) | ||
| } | ||
|
|
||
| #[inline] | ||
| pub fn prove_trusted_mle_evaluations( | ||
| &self, | ||
| eval_point: Point<GC::EF>, | ||
| mle_rounds: Rounds<Message<Mle<GC::F>>>, | ||
| evaluation_claims: Rounds<Evaluations<GC::EF>>, | ||
| prover_data: Rounds<BasefoldProverData<GC::F, P::ProverData>>, | ||
| challenger: &mut GC::Challenger, | ||
| ) -> Result<BasefoldProof<GC>, BaseFoldConfigProverError<GC, P>> { | ||
| let fri_prover = FriCpuProver::<GC, P>(PhantomData); | ||
| // Get all the mles from all rounds in order. | ||
| let mles = mle_rounds | ||
| .iter() | ||
| .flat_map(|round| round.clone().into_iter()) | ||
| .collect::<Message<Mle<_, _>>>(); | ||
|
|
||
| let encoded_messages = prover_data | ||
| .iter() | ||
| .flat_map(|data| data.encoded_messages.iter().cloned()) | ||
| .collect::<Message<RsCodeWord<_, _>>>(); | ||
|
|
||
| let evaluation_claims = evaluation_claims.into_iter().flatten().collect::<Vec<_>>(); | ||
|
|
||
| // Grind for batch randomness. | ||
| let batch_grinding_witness = challenger.grind(BATCH_GRINDING_BITS); | ||
|
|
||
| // Sample batching coefficients via partial Lagrange basis. | ||
| let total_len = mles.iter().map(|mle| mle.num_polynomials()).sum::<usize>(); | ||
| let num_batching_variables = total_len.next_power_of_two().ilog2(); | ||
| let batching_point = challenger.sample_point::<GC::EF>(num_batching_variables); | ||
|
|
||
| let batching_coefficients = partial_lagrange_blocking(&batching_point); | ||
|
|
||
| // Batch the mles and codewords. | ||
| let (mle_batch, codeword_batch, batched_eval_claim) = fri_prover.batch( | ||
| &batching_coefficients, | ||
| mles, | ||
| encoded_messages, | ||
| evaluation_claims, | ||
| &self.encoder, | ||
| ); | ||
|
|
||
| // Run the BaseFold protocol on the random linear combination codeword, | ||
| // the random linear combination multilinear, and the random linear combination of the | ||
| // evaluation claims. | ||
| let mut proof = self.prove_from_prebatched_inputs( | ||
| eval_point, | ||
| mle_batch, | ||
| batched_eval_claim, | ||
| codeword_batch, | ||
| prover_data, | ||
| challenger, | ||
| )?; | ||
| // Add in the true grinding witness; | ||
| proof.batch_grinding_witness = batch_grinding_witness; | ||
| Ok(proof) | ||
| } | ||
|
|
||
| pub fn prove_untrusted_evaluations( | ||
| &self, | ||
| eval_point: Point<GC::EF>, | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: maybe not necessary