Skip to content

Commit 0d033e1

Browse files
leo-starkwarealon-f
andcommitted
Adapt the queries for the preprocessed trees. (#1281)
Co-authored-by: Alon F <alonf@starkware.co>
1 parent 83644d5 commit 0d033e1

File tree

7 files changed

+94
-13
lines changed

7 files changed

+94
-13
lines changed

crates/examples/src/poseidon/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,7 @@ mod tests {
487487
);
488488
}
489489

490+
#[ignore = "AIRs with constraint degree >= 2 are not supported yet in the lifted protocol."]
490491
#[test_log::test]
491492
fn test_simd_poseidon_prove() {
492493
// Note: To see time measurement, run test with
@@ -530,6 +531,7 @@ mod tests {
530531
verify(&[&component], channel, commitment_scheme, proof).unwrap();
531532
}
532533

534+
#[ignore = "AIRs with constraint degree >= 2 are not supported yet in the lifted protocol."]
533535
#[cfg(feature = "tracing")]
534536
#[test]
535537
fn trace_simd_poseidon_prove() {

crates/stwo/src/core/pcs/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
//! Note: Opened points cannot come from the commitment domain.
99
1010
pub mod quotients;
11-
mod utils;
11+
pub mod utils;
1212
mod verifier;
1313

1414
use serde::{Deserialize, Serialize};

crates/stwo/src/core/pcs/utils.rs

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,3 +174,23 @@ impl<T> TreeVec<ColumnVec<Vec<T>>> {
174174
self.0.into_iter().flatten().flatten().collect()
175175
}
176176
}
177+
178+
pub fn prepare_preprocessed_query_positions(
179+
query_positions: &[usize],
180+
max_log_size: u32,
181+
pp_max_log_size: u32,
182+
) -> Vec<usize> {
183+
if pp_max_log_size == 0 {
184+
return vec![];
185+
};
186+
if max_log_size < pp_max_log_size {
187+
return query_positions
188+
.iter()
189+
.map(|pos| (pos >> 1 << (pp_max_log_size - max_log_size + 1)) + (pos & 1))
190+
.collect();
191+
}
192+
query_positions
193+
.iter()
194+
.map(|pos| (pos >> (max_log_size - pp_max_log_size + 1) << 1) + (pos & 1))
195+
.collect()
196+
}

crates/stwo/src/core/pcs/verifier.rs

Lines changed: 46 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ use super::utils::TreeVec;
1111
use super::PcsConfig;
1212
use crate::core::channel::{Channel, MerkleChannel};
1313
use crate::core::pcs::quotients::CommitmentSchemeProof;
14+
use crate::core::pcs::utils::prepare_preprocessed_query_positions;
1415
use crate::core::vcs_lifted::merkle_hasher::MerkleHasherLifted;
1516
use crate::core::vcs_lifted::verifier::MerkleVerifierLifted;
1617
use crate::core::verifier::VerificationError;
@@ -62,31 +63,69 @@ impl<MC: MerkleChannel> CommitmentSchemeVerifier<MC> {
6263
) -> Result<(), VerificationError> {
6364
channel.mix_felts(&proof.sampled_values.clone().flatten_cols());
6465
let random_coeff = channel.draw_secure_felt();
65-
let max_log_size = *self.column_log_sizes().iter().flatten().max().unwrap();
66+
// The lifting log size is the length of the longest column which has at least one sample
67+
// (i.e. a column which is actually used in the constraints). Usually, the only columns
68+
// that have an empty vector of samples are among the preprocessed columns.
69+
let lifting_log_size = self
70+
.column_log_sizes()
71+
.zip_cols(&sampled_points)
72+
.flatten()
73+
.iter()
74+
.filter(|(_, sampled_points)| !sampled_points.is_empty())
75+
.map(|(log_size, _)| *log_size)
76+
.max()
77+
.unwrap();
6678

6779
let bound =
68-
CirclePolyDegreeBound::new(max_log_size - self.config.fri_config.log_blowup_factor);
80+
CirclePolyDegreeBound::new(lifting_log_size - self.config.fri_config.log_blowup_factor);
6981

7082
// FRI commitment phase on OODS quotients.
7183
let mut fri_verifier =
7284
FriVerifier::<MC>::commit(channel, self.config.fri_config, proof.fri_proof, bound)?;
7385

7486
// Verify proof of work.
75-
7687
if !channel.verify_pow_nonce(self.config.pow_bits, proof.proof_of_work) {
7788
return Err(VerificationError::ProofOfWork);
7889
}
7990
channel.mix_u64(proof.proof_of_work);
8091
// Get FRI query positions.
8192
let query_positions = fri_verifier.sample_query_positions(channel);
82-
// Verify merkle decommitments.
93+
let preprocessed_query_positions = prepare_preprocessed_query_positions(
94+
&query_positions,
95+
lifting_log_size,
96+
self.column_log_sizes()[0]
97+
.iter()
98+
.max()
99+
.copied()
100+
.unwrap_or_default(),
101+
);
102+
103+
// Build the query positions tree: the preprocessed tree needs a different treatment than
104+
// the other trees.
105+
let query_positions_tree = TreeVec::new(
106+
self.trees
107+
.iter()
108+
.enumerate()
109+
.map(|(i, _)| {
110+
if i == 0 {
111+
preprocessed_query_positions.as_slice()
112+
} else {
113+
query_positions.as_slice()
114+
}
115+
})
116+
.collect::<Vec<_>>(),
117+
);
118+
// Verify decommitments.
83119
self.trees
84120
.as_ref()
85121
.zip_eq(proof.decommitments)
86122
.zip_eq(proof.queried_values.clone())
87-
.map(|((tree, decommitment), queried_values)| {
88-
tree.verify(&query_positions, queried_values, decommitment)
89-
})
123+
.zip_eq(query_positions_tree)
124+
.map(
125+
|(((tree, decommitment), queried_values), query_positions)| {
126+
tree.verify(query_positions, queried_values, decommitment)
127+
},
128+
)
90129
.0
91130
.into_iter()
92131
.collect::<Result<(), _>>()?;

crates/stwo/src/core/vcs_lifted/verifier.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ impl<H: MerkleHasherLifted> MerkleVerifierLifted<H> {
103103
let mut prev_layer_hashes: Vec<(usize, H::Hash)> = query_positions
104104
.iter()
105105
.zip_eq(queried_values.chunks_exact(self.column_log_sizes.len()))
106+
.dedup_by(|(idx, _), (idx2, _)| idx == idx2)
106107
.map(|(idx, column_values)| {
107108
let mut hasher = H::default_with_initial_state();
108109
hasher.update_leaf(column_values);

crates/stwo/src/prover/pcs/mod.rs

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ use crate::core::fields::qm31::SecureField;
1212
use crate::core::pcs::quotients::{
1313
CommitmentSchemeProof, CommitmentSchemeProofAux, ExtendedCommitmentSchemeProof, PointSample,
1414
};
15+
use crate::core::pcs::utils::prepare_preprocessed_query_positions;
1516
use crate::core::pcs::{PcsConfig, TreeSubspan, TreeVec};
1617
use crate::core::poly::circle::CanonicCoset;
1718
use crate::core::vcs_lifted::merkle_hasher::MerkleHasherLifted;
@@ -199,13 +200,30 @@ impl<'a, B: BackendForChannel<MC>, MC: MerkleChannel> CommitmentSchemeProver<'a,
199200
unsorted_query_locations,
200201
} = fri_prover.decommit(channel);
201202

202-
// Decommit the FRI queries on the merkle trees.
203-
let decommitment_results = self
203+
// Build the query position tree.
204+
let preprocessed_query_positions = prepare_preprocessed_query_positions(
205+
&query_positions,
206+
max_log_size,
207+
self.trees[0].commitment.layers.len() as u32 - 1,
208+
);
209+
let query_positions_tree = TreeVec::new(
210+
self.trees
211+
.iter()
212+
.enumerate()
213+
.map(|(i, _)| {
214+
if i == 0 {
215+
preprocessed_query_positions.as_slice()
216+
} else {
217+
query_positions.as_slice()
218+
}
219+
})
220+
.collect::<Vec<_>>(),
221+
);
222+
let (queried_values, decommitments, aux): (Vec<_>, Vec<_>, Vec<_>) = self
204223
.trees
205224
.as_ref()
206-
.map(|tree| tree.decommit(&query_positions));
207-
208-
let (queried_values, decommitments, aux): (Vec<_>, Vec<_>, Vec<_>) = decommitment_results
225+
.zip_eq(query_positions_tree)
226+
.map(|(tree, query_positions)| tree.decommit(query_positions))
209227
.0
210228
.into_iter()
211229
.map(|(v, x)| (v, x.decommitment, x.aux))

crates/stwo/src/prover/vcs_lifted/prover.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ impl<B: MerkleOpsLifted<H>, H: MerkleHasherLifted> MerkleProverLifted<B, H> {
8989
}
9090

9191
let mut prev_layer_queries = queries_position.to_vec();
92+
prev_layer_queries.dedup();
9293
// The largest log size of a layer is equal to `self.layers.len() - 1`. We start iterating
9394
// from the layer of log size `self.layers.len() - 2` so that we always have a previous
9495
// layer available for the computation.

0 commit comments

Comments
 (0)