Skip to content

Commit bfe5b34

Browse files
committed
daphne: Add support for the Mastic VDAF
Mastic (https://datatracker.ietf.org/doc/draft-mouris-cfrg-mastic/) is a VDAF that enables a richer set of functionalities than the VDAFs we support so so far. The `daphne::vdaf::mastic` module contains a "dummy" version of Mastic intended to exercise the DAP protocol logic we would need in order to fully support this VDAF. The `prio` crate now implements Mastic, so upgrade to a version of the crate that supports it and replace the dummy VDAF with the real one. In addition, to complete aggregation of a report, it is necessary to know the aggregation parameter, which currently is only plumbed to report initialization. In particular, `DapTaskConfig::produce_agg_job_resp()` needs the aggregation parameter from the aggregation job request message. (Likewise, `ToInitializedReportsTransition::with_initialized_reports()` needs the aggregation parameter.) Finally, clean up some API cruft in `daphne::vdaf`: 1. Encapsulate variants of Mastic behind a `MasticConfig` as we've done for other VDAFs. 2. Modify `prep_finish_from_shares()` to not take the aggregator ID. This is a relic of when we supported DAP-02, when this method may have been called by either the Leader or the Helper. Now it's always called by the Helper. 3. Implement state encoding for Mastic, as required by the new async Helper implementation. 4. Generalize the `prep_init()` function in `daphne::prio3` to be used for Mastic as well.
1 parent dac3bcc commit bfe5b34

File tree

14 files changed

+328
-389
lines changed

14 files changed

+328
-389
lines changed

Cargo.lock

Lines changed: 1 addition & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,8 @@ matchit = "0.7.3"
6363
p256 = { version = "0.13.2", features = ["ecdsa-core", "ecdsa", "pem"] }
6464
paste = "1.0.15"
6565
prio_draft09 = { package = "prio", version = "0.16.7" }
66-
prio = { git = "https://github.com/divviup/libprio-rs.git", rev = "c50bb9a47b396ad6a08a3fec36b98bcc2d9217a1" }
66+
# TODO Point to version `0.17.0` once release. This revision is one commit ahead of `0.17.0-alpha.0`.
67+
prio = { git = "https://github.com/divviup/libprio-rs.git", rev = "e5e8a47ee4567f7588d0b5c8d20f75dde4061b2f" }
6768
prometheus = "0.13.4"
6869
rand = "0.8.5"
6970
rayon = "1.10.0"

crates/daphne-worker/src/aggregator/router/helper.rs

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,18 +11,19 @@ use axum::{
1111
routing::{post, put},
1212
};
1313
use daphne::{
14+
error::DapAbort,
1415
fatal_error,
1516
hpke::HpkeProvider,
1617
messages::AggregateShareReq,
1718
roles::{
1819
helper::{self, HashedAggregationJobReq},
1920
DapAggregator, DapHelper,
2021
},
21-
DapError, DapResponse,
22+
DapAggregationParam, DapError, DapResponse,
2223
};
2324
use daphne_service_utils::compute_offload;
2425
use http::StatusCode;
25-
use prio::codec::ParameterizedEncode;
26+
use prio::codec::{ParameterizedDecode, ParameterizedEncode};
2627
use std::{borrow::Cow, sync::Arc};
2728

2829
pub(super) fn add_helper_routes(router: super::Router<App>) -> super::Router<App> {
@@ -60,6 +61,12 @@ async fn agg_job(
6061

6162
let hpke_receiver_configs = app.get_hpke_receiver_configs(req.version).await?;
6263

64+
let agg_param = DapAggregationParam::get_decoded_with_param(
65+
&transition.task_config.vdaf,
66+
&req.payload.agg_param,
67+
)
68+
.map_err(|e| DapAbort::from_codec_error(e, req.task_id))?;
69+
6370
let initialized_reports: compute_offload::InitializedReports = app
6471
.compute_offload
6572
.compute(
@@ -77,7 +84,7 @@ async fn agg_job(
7784
.map_err(|e| fatal_error!(err = ?e, "failed to offload report initialization"))?;
7885

7986
transition
80-
.with_initialized_reports(initialized_reports.reports)
87+
.with_initialized_reports(agg_param, initialized_reports.reports)
8188
.finish_and_aggregate(&*app)
8289
.await
8390
}

crates/daphne/src/hpke.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -505,7 +505,7 @@ pub mod info_and_aad {
505505
}
506506
}
507507

508-
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
508+
#[derive(Debug, Clone, Copy, PartialEq)]
509509
pub struct AggregateShare<'s> {
510510
// info
511511
pub version: DapVersion,

crates/daphne/src/lib.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ use error::FatalDapError;
7070
use hpke::{HpkeConfig, HpkeKemId};
7171
use messages::taskprov::TaskprovAdvertisement;
7272
#[cfg(feature = "experimental")]
73-
use prio::{codec::Decode, vdaf::poplar1::Poplar1AggregationParam};
73+
use prio::{codec::Decode, vdaf::mastic::MasticAggregationParam};
7474
use prio::{
7575
codec::{CodecError, Encode, ParameterizedDecode},
7676
vdaf::Aggregatable as AggregatableTrait,
@@ -813,11 +813,11 @@ pub enum DapMeasurement {
813813
}
814814

815815
/// An aggregation parameter.
816-
#[derive(Clone, Debug, Eq, PartialEq)]
816+
#[derive(Clone, Debug, PartialEq)]
817817
pub enum DapAggregationParam {
818818
Empty,
819819
#[cfg(feature = "experimental")]
820-
Mastic(Poplar1AggregationParam),
820+
Mastic(MasticAggregationParam),
821821
}
822822

823823
#[cfg(any(test, feature = "test-utils"))]
@@ -877,7 +877,7 @@ impl ParameterizedDecode<VdafConfig> for DapAggregationParam {
877877
let _ = bytes;
878878
match vdaf_config {
879879
#[cfg(feature = "experimental")]
880-
VdafConfig::Mastic { .. } => Ok(Self::Mastic(Poplar1AggregationParam::decode(bytes)?)),
880+
VdafConfig::Mastic(_) => Ok(Self::Mastic(MasticAggregationParam::decode(bytes)?)),
881881
_ => Ok(Self::Empty),
882882
}
883883
}

crates/daphne/src/protocol/aggregator.rs

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,13 @@ impl DapTaskConfig {
236236
task_id: &TaskId,
237237
agg_job_init_req: AggregationJobInitReq,
238238
replay_protection: ReplayProtection,
239-
) -> Result<Vec<InitializedReport<WithPeerPrepShare>>, DapError>
239+
) -> Result<
240+
(
241+
DapAggregationParam,
242+
Vec<InitializedReport<WithPeerPrepShare>>,
243+
),
244+
DapError,
245+
>
240246
where
241247
H: HpkeDecrypter + Sync,
242248
{
@@ -260,7 +266,7 @@ impl DapTaskConfig {
260266
agg_job_init_req.prep_inits.len()
261267
);
262268

263-
agg_job_init_req
269+
let initialized_reports = agg_job_init_req
264270
.prep_inits
265271
.into_par_iter()
266272
.map(|prep_init| {
@@ -274,7 +280,8 @@ impl DapTaskConfig {
274280
&agg_param,
275281
)
276282
})
277-
.collect()
283+
.collect::<Result<Vec<_>, _>>()?;
284+
Ok((agg_param, initialized_reports))
278285
}
279286

280287
/// Helper -> Leader: Produce the `AggregationJobResp` message to send to the Leader and
@@ -283,6 +290,7 @@ impl DapTaskConfig {
283290
pub(crate) fn produce_agg_job_resp(
284291
&self,
285292
task_id: TaskId,
293+
agg_param: &DapAggregationParam,
286294
report_status: &HashMap<ReportId, ReportProcessedStatus>,
287295
part_batch_sel: &PartialBatchSelector,
288296
initialized_reports: &[InitializedReport<WithPeerPrepShare>],
@@ -305,8 +313,8 @@ impl DapTaskConfig {
305313
} => {
306314
let res = self.vdaf.prep_finish_from_shares(
307315
self.version,
308-
1,
309316
task_id,
317+
agg_param,
310318
helper_prep_state.clone(),
311319
helper_prep_share.clone(),
312320
leader_prep_share,

crates/daphne/src/roles/helper/handle_agg_job.rs

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ use crate::{
1111
metrics::ReportStatus,
1212
protocol::aggregator::ReportProcessedStatus,
1313
roles::{aggregator::MergeAggShareError, resolve_task_config},
14-
DapError, DapRequest, DapTaskConfig, InitializedReport, WithPeerPrepShare,
14+
DapAggregationParam, DapError, DapRequest, DapTaskConfig, InitializedReport, WithPeerPrepShare,
1515
};
1616
use std::{collections::HashMap, sync::Once};
1717

@@ -48,6 +48,7 @@ pub struct ToInitializedReportsTransition {
4848
/// The reports have been initialized and are ready for aggregation.
4949
pub struct InitializedReports {
5050
task_id: TaskId,
51+
agg_param: DapAggregationParam,
5152
part_batch_sel: PartialBatchSelector,
5253
task_config: DapTaskConfig,
5354
reports: Vec<InitializedReport<WithPeerPrepShare>>,
@@ -142,7 +143,7 @@ impl HandleAggJob<WithTaskConfig> {
142143
} = self.state;
143144
let task_id = request.task_id;
144145
let part_batch_sel = request.payload.part_batch_sel.clone();
145-
let initialized_reports = task_config.consume_agg_job_req(
146+
let (agg_param, initialized_reports) = task_config.consume_agg_job_req(
146147
&aggregator
147148
.get_hpke_receiver_configs(task_config.version)
148149
.await?,
@@ -155,6 +156,7 @@ impl HandleAggJob<WithTaskConfig> {
155156
Ok(HandleAggJob {
156157
state: InitializedReports {
157158
task_id,
159+
agg_param,
158160
task_config,
159161
part_batch_sel,
160162
reports: initialized_reports,
@@ -207,6 +209,7 @@ impl ToInitializedReportsTransition {
207209
/// Provide the initialized reports that should be aggregated.
208210
pub fn with_initialized_reports(
209211
self,
212+
agg_param: DapAggregationParam,
210213
reports: Vec<InitializedReport<WithPeerPrepShare>>,
211214
) -> HandleAggJob<InitializedReports> {
212215
let Self {
@@ -217,6 +220,7 @@ impl ToInitializedReportsTransition {
217220
HandleAggJob {
218221
state: InitializedReports {
219222
task_id,
223+
agg_param,
220224
part_batch_sel,
221225
task_config,
222226
reports,
@@ -236,6 +240,7 @@ impl HandleAggJob<InitializedReports> {
236240
state:
237241
InitializedReports {
238242
task_id,
243+
agg_param,
239244
part_batch_sel,
240245
task_config,
241246
reports,
@@ -257,6 +262,7 @@ impl HandleAggJob<InitializedReports> {
257262
for _ in 0..RETRY_COUNT {
258263
let (agg_span, agg_job_resp) = task_config.produce_agg_job_resp(
259264
task_id,
265+
&agg_param,
260266
&report_status,
261267
&part_batch_sel,
262268
&reports,

crates/daphne/src/roles/mod.rs

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ async fn resolve_task_config(
130130
mod test {
131131
use super::{aggregator, helper, leader, DapLeader};
132132
#[cfg(feature = "experimental")]
133-
use crate::vdaf::{mastic::MasticWeight, MasticWeightConfig};
133+
use crate::vdaf::mastic::{MasticConfig, MasticWeight, MasticWeightConfig};
134134
use crate::{
135135
assert_metrics_include, async_test_versions,
136136
constants::DapMediaType,
@@ -151,7 +151,7 @@ mod test {
151151
use assert_matches::assert_matches;
152152
use prio::codec::{Encode, ParameterizedDecode};
153153
#[cfg(feature = "experimental")]
154-
use prio::{idpf::IdpfInput, vdaf::poplar1::Poplar1AggregationParam};
154+
use prio::idpf::IdpfInput;
155155
use rand::{thread_rng, Rng};
156156
use std::{
157157
collections::HashMap,
@@ -275,10 +275,10 @@ mod test {
275275

276276
#[cfg(feature = "experimental")]
277277
{
278-
let mastic = VdafConfig::Mastic {
279-
input_size: 1,
278+
let mastic = VdafConfig::Mastic(MasticConfig {
279+
bits: 8,
280280
weight_config: MasticWeightConfig::Count,
281-
};
281+
});
282282
tasks.insert(
283283
mastic_task_id,
284284
DapTaskConfig {
@@ -2018,6 +2018,8 @@ mod test {
20182018
#[cfg(feature = "experimental")]
20192019
#[tokio::test]
20202020
async fn mastic() {
2021+
use prio::vdaf::mastic::MasticAggregationParam;
2022+
20212023
let t = Test::new(DapVersion::Latest);
20222024
let task_id = &t.mastic_task_id;
20232025
let task_config = t
@@ -2043,11 +2045,14 @@ mod test {
20432045
// Collector: Request result from the Leader.
20442046
let query = task_config.query_for_current_batch_window(t.now);
20452047
let agg_param = DapAggregationParam::Mastic(
2046-
Poplar1AggregationParam::try_from_prefixes(vec![
2047-
IdpfInput::from_bytes(&[0]),
2048-
IdpfInput::from_bytes(&[1]),
2049-
IdpfInput::from_bytes(&[7]),
2050-
])
2048+
MasticAggregationParam::new(
2049+
vec![
2050+
IdpfInput::from_bytes(&[0]),
2051+
IdpfInput::from_bytes(&[1]),
2052+
IdpfInput::from_bytes(&[7]),
2053+
],
2054+
true,
2055+
)
20512056
.unwrap(),
20522057
);
20532058
leader::handle_coll_job_req(

crates/daphne/src/testing/mod.rs

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -209,22 +209,26 @@ impl AggregationJobTest {
209209
&self,
210210
agg_job_init_req: AggregationJobInitReq,
211211
) -> (DapAggregateSpan<DapAggregateShare>, AggregationJobResp) {
212+
let part_batch_sel = agg_job_init_req.part_batch_sel.clone();
213+
let (agg_param, initialized_reports) = self
214+
.task_config
215+
.consume_agg_job_req(
216+
&self.helper_hpke_receiver_config,
217+
self.valid_report_time_range(),
218+
&self.task_id,
219+
agg_job_init_req,
220+
self.replay_protection,
221+
)
222+
.unwrap();
223+
212224
let (span, resp) = self
213225
.task_config
214226
.produce_agg_job_resp(
215227
self.task_id,
228+
&agg_param,
216229
&HashMap::default(),
217-
&agg_job_init_req.part_batch_sel.clone(),
218-
&self
219-
.task_config
220-
.consume_agg_job_req(
221-
&self.helper_hpke_receiver_config,
222-
self.valid_report_time_range(),
223-
&self.task_id,
224-
agg_job_init_req,
225-
self.replay_protection,
226-
)
227-
.unwrap(),
230+
&part_batch_sel,
231+
&initialized_reports,
228232
)
229233
.unwrap();
230234
(span, resp.into())

crates/daphne/src/vdaf/draft09.rs

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -97,26 +97,18 @@ where
9797

9898
pub(crate) fn prep_finish_from_shares<V, const VERIFY_KEY_SIZE: usize, const NONCE_SIZE: usize>(
9999
vdaf: &V,
100-
agg_id: usize,
101100
host_state: V::PrepareState,
102101
host_share: V::PrepareShare,
103102
peer_share_data: &[u8],
104103
) -> Result<(V::OutputShare, Vec<u8>), VdafError>
105104
where
106105
V: Vdaf<AggregationParam = ()> + Aggregator<VERIFY_KEY_SIZE, NONCE_SIZE>,
107106
{
108-
// Decode the Helper's inbound message.
107+
// Decode the peer's inbound message.
109108
let peer_share = V::PrepareShare::get_decoded_with_param(&host_state, peer_share_data)?;
110109

111110
// Preprocess the inbound messages.
112-
let message = vdaf.prepare_shares_to_prepare_message(
113-
&(),
114-
if agg_id == 0 {
115-
[host_share, peer_share]
116-
} else {
117-
[peer_share, host_share]
118-
},
119-
)?;
111+
let message = vdaf.prepare_shares_to_prepare_message(&(), [peer_share, host_share])?;
120112
let message_data = message.get_encoded()?;
121113

122114
// Compute the host's output share.

0 commit comments

Comments
 (0)