Skip to content

Commit

Permalink
Replace generate_* functions with methods of HpkeKeypair
Browse files Browse the repository at this point in the history
  • Loading branch information
inahga committed Jul 9, 2024
1 parent 4bd3236 commit 3fe8cbb
Show file tree
Hide file tree
Showing 22 changed files with 152 additions and 273 deletions.
16 changes: 5 additions & 11 deletions aggregator/src/aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3456,10 +3456,7 @@ mod tests {
test_util::noop_meter,
};
use janus_core::{
hpke::{
self, test_util::generate_test_hpke_config_and_private_key_with_id,
HpkeApplicationInfo, HpkeKeypair, Label,
},
hpke::{self, HpkeApplicationInfo, HpkeKeypair, Label},
test_util::{
install_test_trace_subscriber,
runtime::{TestRuntime, TestRuntimeManager},
Expand Down Expand Up @@ -3966,11 +3963,10 @@ mod tests {
let leader_task = task.leader_view().unwrap();

// Same ID as the task to test having both keys to choose from.
let global_hpke_keypair_same_id = generate_test_hpke_config_and_private_key_with_id(
(*leader_task.current_hpke_key().config().id()).into(),
);
let global_hpke_keypair_same_id =
HpkeKeypair::test_with_id((*leader_task.current_hpke_key().config().id()).into());
// Different ID to test misses on the task key.
let global_hpke_keypair_different_id = generate_test_hpke_config_and_private_key_with_id(
let global_hpke_keypair_different_id = HpkeKeypair::test_with_id(
(0..)
.map(HpkeConfigId::from)
.find(|id| !leader_task.hpke_keys().contains_key(id))
Expand Down Expand Up @@ -4158,9 +4154,7 @@ mod tests {
&task,
clock.now(),
random(),
&generate_test_hpke_config_and_private_key_with_id(
(*task.current_hpke_key().config().id()).into(),
),
&HpkeKeypair::test_with_id((*task.current_hpke_key().config().id()).into()),
);

// Try to upload the report, verify that we get the expected error.
Expand Down
22 changes: 11 additions & 11 deletions aggregator/src/aggregator/aggregation_job_creator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -923,7 +923,7 @@ mod tests {
test_util::noop_meter,
};
use janus_core::{
hpke::test_util::generate_test_hpke_config_and_private_key,
hpke::HpkeKeypair,
test_util::{install_test_trace_subscriber, run_vdaf},
time::{Clock, DurationExt, IntervalExt, MockClock, TimeExt},
vdaf::{VdafInstance, VERIFY_KEY_LENGTH},
Expand Down Expand Up @@ -978,7 +978,7 @@ mod tests {
let batch_identifier =
TimeInterval::to_batch_identifier(&leader_task, &(), &report_time).unwrap();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let leader_report_metadata = ReportMetadata::new(random(), report_time);
let leader_transcript = run_vdaf(
vdaf.as_ref(),
Expand Down Expand Up @@ -1160,7 +1160,7 @@ mod tests {
// batches shouldn't have any aggregation jobs in common since we can fill our aggregation
// jobs without overlap.
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();

let first_report_time = clock.now();
let second_report_time = clock.now().add(task.time_precision()).unwrap();
Expand Down Expand Up @@ -1345,7 +1345,7 @@ mod tests {
let report_time = clock.now();
let batch_identifier = TimeInterval::to_batch_identifier(&task, &(), &report_time).unwrap();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();

let first_report_metadata = ReportMetadata::new(random(), report_time);
let first_transcript = run_vdaf(
Expand Down Expand Up @@ -1555,7 +1555,7 @@ mod tests {
// Create a min-size batch.
let report_time = clock.now();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let batch_identifier = TimeInterval::to_batch_identifier(&task, &(), &report_time).unwrap();
let reports: Arc<Vec<_>> = Arc::new(
iter::repeat_with(|| {
Expand Down Expand Up @@ -1742,7 +1742,7 @@ mod tests {
// containing these reports.
let report_time = clock.now();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let reports: Arc<Vec<_>> = Arc::new(
iter::repeat_with(|| {
let report_metadata = ReportMetadata::new(random(), report_time);
Expand Down Expand Up @@ -1964,7 +1964,7 @@ mod tests {
// the reports should remain "unaggregated".
let report_time = clock.now();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let reports: Arc<Vec<_>> = Arc::new(
iter::repeat_with(|| {
let report_metadata = ReportMetadata::new(random(), report_time);
Expand Down Expand Up @@ -2128,7 +2128,7 @@ mod tests {
// of reports for the second batch.
let report_time = clock.now();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let reports: Arc<Vec<_>> = Arc::new(
iter::repeat_with(|| {
let report_metadata = ReportMetadata::new(random(), report_time);
Expand Down Expand Up @@ -2392,7 +2392,7 @@ mod tests {
// job with the remainder of the reports.
let report_time = clock.now();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let reports: Arc<Vec<_>> = Arc::new(
iter::repeat_with(|| {
let report_metadata = ReportMetadata::new(random(), report_time);
Expand Down Expand Up @@ -2665,7 +2665,7 @@ mod tests {
let report_time_1 = clock.now().sub(&batch_time_window_size).unwrap();
let report_time_2 = clock.now();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();

let mut reports = Vec::new();
reports.extend(
Expand Down Expand Up @@ -2979,7 +2979,7 @@ mod tests {
// aggregation jobs to be created containing all these reports, but only two batches.
let report_time = clock.now();
let vdaf = Arc::new(Prio3::new_count(2).unwrap());
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let reports: Arc<Vec<_>> = Arc::new(
iter::repeat_with(|| {
let report_metadata = ReportMetadata::new(random(), report_time);
Expand Down
24 changes: 12 additions & 12 deletions aggregator/src/aggregator/aggregation_job_driver/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use janus_aggregator_core::{
test_util::noop_meter,
};
use janus_core::{
hpke::test_util::generate_test_hpke_config_and_private_key,
hpke::HpkeKeypair,
report_id::ReportIdChecksumExt,
retries::test_util::LimitedRetryer,
test_util::{install_test_trace_subscriber, run_vdaf, runtime::TestRuntimeManager},
Expand Down Expand Up @@ -97,7 +97,7 @@ async fn aggregation_job_driver() {
);

let agg_auth_token = task.aggregator_auth_token().clone();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -375,7 +375,7 @@ async fn step_time_interval_aggregation_job_init_single_step() {
);

let agg_auth_token = task.aggregator_auth_token();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -704,7 +704,7 @@ async fn step_time_interval_aggregation_job_init_two_steps() {
);

let agg_auth_token = task.aggregator_auth_token();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -983,7 +983,7 @@ async fn step_time_interval_aggregation_job_init_partially_garbage_collected() {
);

let agg_auth_token = task.aggregator_auth_token();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let gc_eligible_report = LeaderStoredReport::generate(
*task.id(),
gc_eligible_report_metadata,
Expand Down Expand Up @@ -1325,7 +1325,7 @@ async fn step_fixed_size_aggregation_job_init_single_step() {
);

let agg_auth_token = task.aggregator_auth_token();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -1608,7 +1608,7 @@ async fn step_fixed_size_aggregation_job_init_two_steps() {
);

let agg_auth_token = task.aggregator_auth_token();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -1872,7 +1872,7 @@ async fn step_time_interval_aggregation_job_continue() {
);

let agg_auth_token = task.aggregator_auth_token();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -2207,7 +2207,7 @@ async fn step_fixed_size_aggregation_job_continue() {
);

let agg_auth_token = task.aggregator_auth_token();
let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -2501,7 +2501,7 @@ async fn setup_cancel_aggregation_job_test() -> CancelAggregationJobTestCase {
&false,
);

let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();
let report = LeaderStoredReport::generate(
*task.id(),
report_metadata,
Expand Down Expand Up @@ -2750,7 +2750,7 @@ async fn abandon_failing_aggregation_job_with_retryable_error() {
let aggregation_job_id = random();
let verify_key: VerifyKey<VERIFY_KEY_LENGTH> = task.vdaf_verify_key().unwrap();

let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();

let vdaf = Prio3::new_count(2).unwrap();
let time = clock
Expand Down Expand Up @@ -2991,7 +2991,7 @@ async fn abandon_failing_aggregation_job_with_fatal_error() {
let aggregation_job_id = random();
let verify_key: VerifyKey<VERIFY_KEY_LENGTH> = task.vdaf_verify_key().unwrap();

let helper_hpke_keypair = generate_test_hpke_config_and_private_key();
let helper_hpke_keypair = HpkeKeypair::test();

let vdaf = Prio3::new_count(2).unwrap();
let time = clock
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,7 @@ use janus_aggregator_core::{
};
use janus_core::{
auth_tokens::AuthenticationToken,
hpke::test_util::{
generate_test_hpke_config_and_private_key,
generate_test_hpke_config_and_private_key_with_id,
},
hpke::HpkeKeypair,
report_id::ReportIdChecksumExt,
test_util::{run_vdaf, runtime::TestRuntime},
time::{Clock, MockClock, TimeExt},
Expand Down Expand Up @@ -242,7 +239,7 @@ async fn aggregate_init() {
let (prepare_init_3, transcript_3) = prep_init_generator.next(&measurement);

let wrong_hpke_config = loop {
let hpke_config = generate_test_hpke_config_and_private_key().config().clone();
let hpke_config = HpkeKeypair::test().config().clone();
if helper_task.hpke_keys().contains_key(hpke_config.id()) {
continue;
}
Expand Down Expand Up @@ -703,11 +700,10 @@ async fn aggregate_init_with_reports_encrypted_by_global_key() {

// Insert some global HPKE keys.
// Same ID as the task to test having both keys to choose from.
let global_hpke_keypair_same_id = generate_test_hpke_config_and_private_key_with_id(
(*helper_task.current_hpke_key().config().id()).into(),
);
let global_hpke_keypair_same_id =
HpkeKeypair::test_with_id((*helper_task.current_hpke_key().config().id()).into());
// Different ID to test misses on the task key.
let global_hpke_keypair_different_id = generate_test_hpke_config_and_private_key_with_id(
let global_hpke_keypair_different_id = HpkeKeypair::test_with_id(
(0..)
.map(HpkeConfigId::from)
.find(|id| !helper_task.hpke_keys().contains_key(id))
Expand Down
13 changes: 5 additions & 8 deletions aggregator/src/aggregator/http_handlers/tests/hpke_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,7 @@ use janus_aggregator_core::{
test_util::noop_meter,
};
use janus_core::{
hpke::{
self, test_util::generate_test_hpke_config_and_private_key_with_id, HpkeApplicationInfo,
HpkeKeypair, Label,
},
hpke::{self, HpkeApplicationInfo, HpkeKeypair, Label},
test_util::runtime::TestRuntime,
vdaf::VdafInstance,
};
Expand Down Expand Up @@ -99,7 +96,7 @@ async fn global_hpke_config() {

// Insert an HPKE config, i.e. start the application with a keypair already
// in the database.
let first_hpke_keypair = generate_test_hpke_config_and_private_key_with_id(1);
let first_hpke_keypair = HpkeKeypair::test_with_id(1);
datastore
.run_unnamed_tx(|tx| {
let keypair = first_hpke_keypair.clone();
Expand Down Expand Up @@ -148,7 +145,7 @@ async fn global_hpke_config() {
check_hpke_config_is_usable(&hpke_config_list, &first_hpke_keypair);

// Insert an inactive HPKE config.
let second_hpke_keypair = generate_test_hpke_config_and_private_key_with_id(2);
let second_hpke_keypair = HpkeKeypair::test_with_id(2);
datastore
.run_unnamed_tx(|tx| {
let keypair = second_hpke_keypair.clone();
Expand Down Expand Up @@ -239,7 +236,7 @@ async fn global_hpke_config_with_taskprov() {

// Insert an HPKE config, i.e. start the application with a keypair already
// in the database.
let first_hpke_keypair = generate_test_hpke_config_and_private_key_with_id(1);
let first_hpke_keypair = HpkeKeypair::test_with_id(1);
datastore
.run_unnamed_tx(|tx| {
let keypair = first_hpke_keypair.clone();
Expand Down Expand Up @@ -383,7 +380,7 @@ async fn require_global_hpke_keys() {

// Insert an HPKE config, i.e. start the application with a keypair already
// in the database.
let keypair = generate_test_hpke_config_and_private_key_with_id(1);
let keypair = HpkeKeypair::test_with_id(1);
datastore
.run_unnamed_tx(|tx| {
let keypair = keypair.clone();
Expand Down
9 changes: 2 additions & 7 deletions aggregator/src/aggregator/http_handlers/tests/report.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,7 @@ use janus_aggregator_core::{
test_util::noop_meter,
};
use janus_core::{
hpke::{
self, test_util::generate_test_hpke_config_and_private_key_with_id, HpkeApplicationInfo,
Label,
},
hpke::{self, HpkeApplicationInfo, HpkeKeypair, Label},
test_util::{install_test_trace_subscriber, runtime::TestRuntime},
time::{Clock, DurationExt, MockClock, TimeExt},
vdaf::VdafInstance,
Expand Down Expand Up @@ -263,9 +260,7 @@ async fn upload_handler() {
clock.now(),
*accepted_report_id,
// Encrypt report with some arbitrary key that has the same ID as an existing one.
&generate_test_hpke_config_and_private_key_with_id(
(*leader_task.current_hpke_key().config().id()).into(),
),
&HpkeKeypair::test_with_id((*leader_task.current_hpke_key().config().id()).into()),
);
let mut test_conn = put(task.report_upload_uri().unwrap().path())
.with_request_header(KnownHeaderName::ContentType, Report::MEDIA_TYPE)
Expand Down
14 changes: 4 additions & 10 deletions aggregator/src/aggregator/key_rotator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use janus_aggregator_core::datastore::{
Datastore, Error as DatastoreError, Transaction,
};
use janus_core::{
hpke::{generate_hpke_config_and_private_key, HpkeCiphersuite},
hpke::{HpkeCiphersuite, HpkeKeypair},
time::{Clock, TimeExt},
};
use janus_messages::{Duration, HpkeAeadId, HpkeConfigId, HpkeKdfId, HpkeKemId, Time};
Expand Down Expand Up @@ -341,7 +341,7 @@ impl<'a, C: Clock> HpkeKeyRotator<'a, C> {
let id = self.available_ids.next().ok_or_else(|| {
DatastoreError::User(anyhow!("global HPKE key ID space exhausted").into())
})?;
let keypair = generate_hpke_config_and_private_key(
let keypair = HpkeKeypair::generate(
id,
ciphersuite.kem_id(),
ciphersuite.kdf_id(),
Expand Down Expand Up @@ -514,10 +514,7 @@ mod tests {
Datastore,
};
use janus_core::{
hpke::{
test_util::generate_test_hpke_config_and_private_key_with_id_and_ciphersuite,
HpkeCiphersuite,
},
hpke::{HpkeCiphersuite, HpkeKeypair},
test_util::install_test_trace_subscriber,
time::{Clock, DurationExt, MockClock},
};
Expand Down Expand Up @@ -657,10 +654,7 @@ mod tests {
(
HpkeConfigId::from(id),
GlobalHpkeKeypair::new(
generate_test_hpke_config_and_private_key_with_id_and_ciphersuite(
id,
HpkeCiphersuite::arbitrary(g),
),
HpkeKeypair::test_with_ciphersuite(id, HpkeCiphersuite::arbitrary(g)),
*g.choose(&[
HpkeKeyState::Pending,
HpkeKeyState::Active,
Expand Down
Loading

0 comments on commit 3fe8cbb

Please sign in to comment.