Skip to content

Commit ccccd6f

Browse files
committed
Rename max_log_size to lifting_log_size.
1 parent 5221f96 commit ccccd6f

File tree

4 files changed

+23
-23
lines changed

4 files changed

+23
-23
lines changed

crates/stwo/src/core/pcs/utils.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -177,20 +177,20 @@ impl<T> TreeVec<ColumnVec<Vec<T>>> {
177177

178178
pub fn prepare_preprocessed_query_positions(
179179
query_positions: &[usize],
180-
max_log_size: u32,
180+
lifting_log_size: u32,
181181
pp_max_log_size: u32,
182182
) -> Vec<usize> {
183183
if pp_max_log_size == 0 {
184184
return vec![];
185185
};
186-
if max_log_size < pp_max_log_size {
186+
if lifting_log_size < pp_max_log_size {
187187
return query_positions
188188
.iter()
189-
.map(|pos| (pos >> 1 << (pp_max_log_size - max_log_size + 1)) + (pos & 1))
189+
.map(|pos| (pos >> 1 << (pp_max_log_size - lifting_log_size + 1)) + (pos & 1))
190190
.collect();
191191
}
192192
query_positions
193193
.iter()
194-
.map(|pos| (pos >> (max_log_size - pp_max_log_size + 1) << 1) + (pos & 1))
194+
.map(|pos| (pos >> (lifting_log_size - pp_max_log_size + 1) << 1) + (pos & 1))
195195
.collect()
196196
}

crates/stwo/src/prover/backend/cpu/quotients.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,26 +49,26 @@ impl QuotientOps for CpuBackend {
4949
fn compute_quotients_and_combine(
5050
accumulations: Vec<AccumulatedNumerators<Self>>,
5151
) -> SecureEvaluation<Self, BitReversedOrder> {
52-
let max_log_size = accumulations
52+
let lifting_log_size = accumulations
5353
.iter()
5454
.map(|x| x.partial_numerators_acc.len())
5555
.max()
5656
.unwrap()
5757
.ilog2();
5858

59-
let domain = CanonicCoset::new(max_log_size).circle_domain();
59+
let domain = CanonicCoset::new(lifting_log_size).circle_domain();
6060
let mut quotients: SecureColumnByCoords<CpuBackend> =
61-
unsafe { SecureColumnByCoords::uninitialized(1 << max_log_size) };
61+
unsafe { SecureColumnByCoords::uninitialized(1 << lifting_log_size) };
6262
let sample_points: Vec<CirclePoint<SecureField>> =
6363
accumulations.iter().map(|x| x.sample_point).collect();
6464
// Populate `quotients`.
6565
for row in 0..quotients.len() {
66-
let domain_point = domain.at(bit_reverse_index(row, max_log_size));
66+
let domain_point = domain.at(bit_reverse_index(row, lifting_log_size));
6767
let inverses = denominator_inverses(&sample_points, domain_point);
6868
let mut quotient = SecureField::zero();
6969
for (acc, den_inv) in accumulations.iter().zip_eq(inverses) {
7070
let mut full_numerator = SecureField::zero();
71-
let log_ratio = max_log_size - acc.partial_numerators_acc.len().ilog2();
71+
let log_ratio = lifting_log_size - acc.partial_numerators_acc.len().ilog2();
7272
let lifted_idx = (row >> (log_ratio + 1) << 1) + (row & 1);
7373

7474
full_numerator += acc.partial_numerators_acc.at(lifted_idx)

crates/stwo/src/prover/backend/simd/quotients.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -74,18 +74,18 @@ impl QuotientOps for SimdBackend {
7474
fn compute_quotients_and_combine(
7575
accumulations: Vec<AccumulatedNumerators<Self>>,
7676
) -> SecureEvaluation<Self, BitReversedOrder> {
77-
let max_log_size = accumulations
77+
let lifting_log_size = accumulations
7878
.iter()
7979
.map(|x| x.partial_numerators_acc.len())
8080
.max()
8181
.unwrap()
8282
.ilog2();
8383

84-
let domain = CanonicCoset::new(max_log_size).circle_domain();
84+
let domain = CanonicCoset::new(lifting_log_size).circle_domain();
8585
let domain_points: Vec<CirclePoint<PackedBaseField>> =
8686
CircleDomainBitRevIterator::new(domain).collect();
8787
let mut quotients: SecureColumnByCoords<SimdBackend> =
88-
unsafe { SecureColumnByCoords::uninitialized(1 << max_log_size) };
88+
unsafe { SecureColumnByCoords::uninitialized(1 << lifting_log_size) };
8989
let sample_points: Vec<CirclePoint<SecureField>> =
9090
accumulations.iter().map(|x| x.sample_point).collect();
9191
let denominators_inverses = denominator_inverses(&sample_points, domain);
@@ -103,7 +103,7 @@ impl QuotientOps for SimdBackend {
103103
for (acc, den_inv) in accumulations.iter().zip_eq(denominators_inverses.iter()) {
104104
let mut full_numerator = PackedSecureField::zero();
105105

106-
let log_ratio = max_log_size - acc.partial_numerators_acc.len().ilog2();
106+
let log_ratio = lifting_log_size - acc.partial_numerators_acc.len().ilog2();
107107
let lifted_partial_numerator =
108108
PackedSecureField::from_packed_m31s(std::array::from_fn(|j| {
109109
let lifted_simd = to_lifted_simd(

crates/stwo/src/prover/pcs/mod.rs

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ impl<'a, B: BackendForChannel<MC>, MC: MerkleChannel> CommitmentSchemeProver<'a,
101101
pub fn build_weights_hash_map(
102102
&self,
103103
sampled_points: &TreeVec<ColumnVec<Vec<CirclePoint<SecureField>>>>,
104-
max_log_size: u32,
104+
lifting_log_size: u32,
105105
) -> WeightsHashMap<B>
106106
where
107107
Col<B, SecureField>: Send + Sync,
@@ -123,17 +123,17 @@ impl<'a, B: BackendForChannel<MC>, MC: MerkleChannel> CommitmentSchemeProver<'a,
123123
let log_size = poly.evals.domain.log_size();
124124
// For each sample point, compute the weights needed to evaluate the polynomial at
125125
// the folded sample point.
126-
// TODO(Leo): the computation `point.repeated_double(max_log_size - log_size)` is
127-
// likely repeated a bunch of times in a typical flat air. Consider moving it
128-
// outside the loop.
126+
// TODO(Leo): the computation `point.repeated_double(lifting_log_size - log_size)`
127+
// is likely repeated a bunch of times in a typical flat air.
128+
// Consider moving it outside the loop.
129129
#[cfg(not(feature = "parallel"))]
130130
points.iter().for_each(|&point| {
131-
compute_weights((log_size, point.repeated_double(max_log_size - log_size)))
131+
compute_weights((log_size, point.repeated_double(lifting_log_size - log_size)))
132132
});
133133

134134
#[cfg(feature = "parallel")]
135135
points.par_iter().for_each(|&point| {
136-
compute_weights((log_size, point.repeated_double(max_log_size - log_size)))
136+
compute_weights((log_size, point.repeated_double(lifting_log_size - log_size)))
137137
});
138138
});
139139

@@ -153,11 +153,11 @@ impl<'a, B: BackendForChannel<MC>, MC: MerkleChannel> CommitmentSchemeProver<'a,
153153
)
154154
.entered();
155155

156-
let max_log_size = self.trees.last().unwrap().commitment.layers.len() as u32 - 1;
156+
let lifting_log_size = self.trees.last().unwrap().commitment.layers.len() as u32 - 1;
157157
let weights_hash_map = if self.store_polynomials_coefficients {
158158
None
159159
} else {
160-
Some(self.build_weights_hash_map(&sampled_points, max_log_size))
160+
Some(self.build_weights_hash_map(&sampled_points, lifting_log_size))
161161
};
162162
let samples: TreeVec<Vec<Vec<PointSample>>> = self
163163
.polynomials()
@@ -168,7 +168,7 @@ impl<'a, B: BackendForChannel<MC>, MC: MerkleChannel> CommitmentSchemeProver<'a,
168168
.map(|&point| PointSample {
169169
point,
170170
value: poly.eval_at_point(
171-
point.repeated_double(max_log_size - poly.evals.domain.log_size()),
171+
point.repeated_double(lifting_log_size - poly.evals.domain.log_size()),
172172
weights_hash_map.as_ref(),
173173
),
174174
})
@@ -209,7 +209,7 @@ impl<'a, B: BackendForChannel<MC>, MC: MerkleChannel> CommitmentSchemeProver<'a,
209209
// Build the query position tree.
210210
let preprocessed_query_positions = prepare_preprocessed_query_positions(
211211
&query_positions,
212-
max_log_size,
212+
lifting_log_size,
213213
self.trees[0].commitment.layers.len() as u32 - 1,
214214
);
215215
let query_positions_tree = TreeVec::new(

0 commit comments

Comments
 (0)