Skip to content

Commit

Permalink
Add Encryption and Decryption for OPRF Report Structs (#904)
Browse files Browse the repository at this point in the history
* defining encrypted oprf report:: constants

* encryptedoprfreport compiles

* reports decrypt encrypt

* decryption roundtrip passes

* fmt

* fix clippy

* fix clippy

* fix clippy

* delete comment

* remove crazy trait bounds

* support encrypted report queries

* add test (fails because of quicksort)

* clippy

* fmt

* fix test

* clippy

* let compiler defer length of gen array

* merge

* addressing some comments

* fmt

* offsets

* split OprfReport into OprfReport and OPRFIPAInputRow

* address other comments

* address Alex comments

* address comments

* address comments

* fix CT length

* added error length and validate when generating report from bytes
  • Loading branch information
danielmasny authored Feb 15, 2024
1 parent 030cfc7 commit b526f55
Show file tree
Hide file tree
Showing 11 changed files with 801 additions and 161 deletions.
3 changes: 2 additions & 1 deletion ipa-core/src/bin/report_collector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -290,11 +290,12 @@ async fn ipa(
let mut key_registries = KeyRegistries::default();
let actual = match query_style {
IpaQueryStyle::Oprf => {
playbook_oprf_ipa::<Fp32BitPrime>(
playbook_oprf_ipa::<Fp32BitPrime, _>(
input_rows,
&helper_clients,
query_id,
ipa_query_config,
key_registries.init_from(network),
)
.await
}
Expand Down
55 changes: 43 additions & 12 deletions ipa-core/src/cli/playbook/ipa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ use crate::{
hpke::PublicKeyRegistry,
ipa_test_input,
net::MpcHelperClient,
protocol::{ipa::IPAInputRow, BreakdownKey, MatchKey, QueryId, Timestamp, TriggerValue},
protocol::{
ipa::IPAInputRow, ipa_prf::OPRFIPAInputRow, BreakdownKey, MatchKey, QueryId, Timestamp,
TriggerValue,
},
query::QueryStatus,
report::{KeyIdentifier, OprfReport, Report},
secret_sharing::{replicated::semi_honest::AdditiveShare, IntoShares},
Expand Down Expand Up @@ -103,31 +106,59 @@ where
run_query_and_validate::<F>(inputs, query_size, clients, query_id, query_config).await
}

pub async fn playbook_oprf_ipa<F>(
pub async fn playbook_oprf_ipa<F, KR>(
records: Vec<TestRawDataRecord>,
clients: &[MpcHelperClient; 3],
query_id: QueryId,
query_config: IpaQueryConfig,
encryption: Option<(KeyIdentifier, [&KR; 3])>,
) -> IpaQueryResult
where
F: PrimeField,
AdditiveShare<F>: Serializable,
KR: PublicKeyRegistry,
{
let mut buffers: [_; 3] = std::array::from_fn(|_| Vec::new());
let query_size = records.len();

let sz = <OprfReport<BreakdownKey, TriggerValue, Timestamp> as Serializable>::Size::USIZE;
for buffer in &mut buffers {
buffer.resize(query_size * sz, 0u8);
}
if !query_config.plaintext_match_keys {
if let Some((key_id, key_registries)) = encryption {
const ESTIMATED_AVERAGE_REPORT_SIZE: usize = 80; // TODO: confirm/adjust
for buffer in &mut buffers {
buffer.reserve(query_size * ESTIMATED_AVERAGE_REPORT_SIZE);
}

let shares: [Vec<OprfReport<BreakdownKey, TriggerValue, Timestamp>>; 3] =
records.iter().cloned().share();
zip(&mut buffers, shares).for_each(|(buf, shares)| {
for (share, chunk) in zip(shares, buf.chunks_mut(sz)) {
share.serialize(GenericArray::from_mut_slice(chunk));
let mut rng = StdRng::from_entropy();
let shares: [Vec<OprfReport<BreakdownKey, TriggerValue, Timestamp>>; 3] =
records.iter().cloned().share();
zip(&mut buffers, shares).zip(key_registries).for_each(
|((buf, shares), key_registry)| {
for share in shares {
share
.delimited_encrypt_to(key_id, key_registry, &mut rng, buf)
.unwrap();
}
},
);
} else {
panic!("match key encryption was requested, but one or more helpers is missing a public key")
}
} else {
let sz =
<OPRFIPAInputRow<BreakdownKey, TriggerValue, Timestamp> as Serializable>::Size::USIZE;
for buffer in &mut buffers {
buffer.resize(query_size * sz, 0u8);
}
});

let shares: [Vec<OPRFIPAInputRow<BreakdownKey, TriggerValue, Timestamp>>; 3] =
records.iter().cloned().share();

zip(&mut buffers, shares).for_each(|(buf, shares)| {
for (share, chunk) in zip(shares, buf.chunks_mut(sz)) {
share.serialize(GenericArray::from_mut_slice(chunk));
}
});
}

let inputs = buffers.map(BodyStream::from);
tracing::info!("Starting query for OPRF");
Expand Down
5 changes: 4 additions & 1 deletion ipa-core/src/hpke/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,12 @@ type IpaKem = hpke::kem::X25519HkdfSha256;
type IpaAead = hpke::aead::AesGcm128;
type IpaKdf = hpke::kdf::HkdfSha256;

pub type EncapsulationSize = <<IpaKem as hpke::Kem>::EncappedKey as Serializable>::OutputSize;
pub type TagSize = <AeadTag<IpaAead> as Serializable>::OutputSize;

pub type IpaPublicKey = <IpaKem as hpke::kem::Kem>::PublicKey;
pub type IpaPrivateKey = <IpaKem as hpke::kem::Kem>::PrivateKey;
pub type IpaEncappedKey = <IpaKem as hpke::kem::Kem>::EncappedKey;
pub type IpaEncapsulatedKey = <IpaKem as hpke::kem::Kem>::EncappedKey;

pub use hpke::{Deserializable, Serializable};

Expand Down
6 changes: 3 additions & 3 deletions ipa-core/src/hpke/registry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ mod tests {
use rand_core::{CryptoRng, RngCore, SeedableRng};

use super::*;
use crate::hpke::{IpaAead, IpaEncappedKey, IpaKdf, IpaKem};
use crate::hpke::{IpaAead, IpaEncapsulatedKey, IpaKdf, IpaKem};

const INFO_STR: &[u8] = b"This is an INFO string.";
const AAD: &[u8] = b"This is AAD.";
Expand All @@ -137,7 +137,7 @@ mod tests {
pk: &IpaPublicKey,
pt: &[u8],
r: &mut R,
) -> (IpaEncappedKey, Vec<u8>) {
) -> (IpaEncapsulatedKey, Vec<u8>) {
let (encapsulated_key, mut encryption_context) =
hpke::setup_sender::<IpaAead, IpaKdf, IpaKem, _>(&OpModeS::Base, pk, INFO_STR, r)
.expect("Can setup the sender.");
Expand All @@ -152,7 +152,7 @@ mod tests {

fn decrypt<I: AsRef<[u8]>>(
sk: &IpaPrivateKey,
payload: &(IpaEncappedKey, I),
payload: &(IpaEncapsulatedKey, I),
) -> Result<Vec<u8>, HpkeError> {
let (encap_key, ct) = payload;
let mut decryption_context = hpke::setup_receiver::<IpaAead, IpaKdf, IpaKem>(
Expand Down
108 changes: 103 additions & 5 deletions ipa-core/src/protocol/ipa_prf/mod.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
use std::num::NonZeroU32;
use std::{num::NonZeroU32, ops::Add};

use generic_array::{ArrayLength, GenericArray};
use ipa_macros::Step;
use typenum::{Unsigned, U18};

use self::{quicksort::quicksort_ranges_by_key_insecure, shuffle::shuffle_inputs};
use crate::{
error::Error,
error::{Error, UnwrapInfallible},
ff::{boolean::Boolean, boolean_array::BA64, CustomArray, Field, PrimeField, Serializable},
protocol::{
context::{UpgradableContext, UpgradedContext},
Expand All @@ -17,7 +19,6 @@ use crate::{
},
RecordId,
},
report::OprfReport,
secret_sharing::{
replicated::{malicious::ExtendableField, semi_honest::AdditiveShare as Replicated},
SharedValue,
Expand All @@ -40,6 +41,103 @@ pub(crate) enum Step {
SortByTimestamp,
}

#[derive(Debug)]
#[cfg_attr(test, derive(Clone, PartialEq, Eq))]
pub struct OPRFIPAInputRow<BK: SharedValue, TV: SharedValue, TS: SharedValue> {
pub match_key: Replicated<BA64>,
pub is_trigger: Replicated<Boolean>,
pub breakdown_key: Replicated<BK>,
pub trigger_value: Replicated<TV>,
pub timestamp: Replicated<TS>,
}

impl<BK: SharedValue, TV: SharedValue, TS: SharedValue> Serializable for OPRFIPAInputRow<BK, TV, TS>
where
Replicated<BK>: Serializable,
Replicated<TV>: Serializable,
Replicated<TS>: Serializable,
<Replicated<BK> as Serializable>::Size: Add<U18>,
<Replicated<TS> as Serializable>::Size:
Add<<<Replicated<BK> as Serializable>::Size as Add<U18>>::Output>,
<Replicated<TV> as Serializable>::Size: Add<
<<Replicated<TS> as Serializable>::Size as Add<
<<Replicated<BK> as Serializable>::Size as Add<U18>>::Output,
>>::Output,
>,
<<Replicated<TV> as Serializable>::Size as Add<
<<Replicated<TS> as Serializable>::Size as Add<
<<Replicated<BK> as Serializable>::Size as Add<U18>>::Output,
>>::Output,
>>::Output: ArrayLength,
{
type Size = <<Replicated<TV> as Serializable>::Size as Add<
<<Replicated<TS> as Serializable>::Size as Add<
<<Replicated<BK> as Serializable>::Size as Add<U18>>::Output,
>>::Output,
>>::Output;
type DeserializationError = Error;

fn serialize(&self, buf: &mut GenericArray<u8, Self::Size>) {
let mk_sz = <Replicated<BA64> as Serializable>::Size::USIZE;
let ts_sz = <Replicated<TS> as Serializable>::Size::USIZE;
let bk_sz = <Replicated<BK> as Serializable>::Size::USIZE;
let tv_sz = <Replicated<TV> as Serializable>::Size::USIZE;
let it_sz = <Replicated<Boolean> as Serializable>::Size::USIZE;

self.match_key
.serialize(GenericArray::from_mut_slice(&mut buf[..mk_sz]));

self.timestamp
.serialize(GenericArray::from_mut_slice(&mut buf[mk_sz..mk_sz + ts_sz]));

self.breakdown_key.serialize(GenericArray::from_mut_slice(
&mut buf[mk_sz + ts_sz..mk_sz + ts_sz + bk_sz],
));

self.trigger_value.serialize(GenericArray::from_mut_slice(
&mut buf[mk_sz + ts_sz + bk_sz..mk_sz + ts_sz + bk_sz + tv_sz],
));

self.is_trigger.serialize(GenericArray::from_mut_slice(
&mut buf[mk_sz + ts_sz + bk_sz + tv_sz..mk_sz + ts_sz + bk_sz + tv_sz + it_sz],
));
}

fn deserialize(buf: &GenericArray<u8, Self::Size>) -> Result<Self, Self::DeserializationError> {
let mk_sz = <Replicated<BA64> as Serializable>::Size::USIZE;
let ts_sz = <Replicated<TS> as Serializable>::Size::USIZE;
let bk_sz = <Replicated<BK> as Serializable>::Size::USIZE;
let tv_sz = <Replicated<TV> as Serializable>::Size::USIZE;
let it_sz = <Replicated<Boolean> as Serializable>::Size::USIZE;

let match_key = Replicated::<BA64>::deserialize(GenericArray::from_slice(&buf[..mk_sz]))
.unwrap_infallible();
let timestamp =
Replicated::<TS>::deserialize(GenericArray::from_slice(&buf[mk_sz..mk_sz + ts_sz]))
.map_err(|e| Error::ParseError(e.into()))?;
let breakdown_key = Replicated::<BK>::deserialize(GenericArray::from_slice(
&buf[mk_sz + ts_sz..mk_sz + ts_sz + bk_sz],
))
.map_err(|e| Error::ParseError(e.into()))?;
let trigger_value = Replicated::<TV>::deserialize(GenericArray::from_slice(
&buf[mk_sz + ts_sz + bk_sz..mk_sz + ts_sz + bk_sz + tv_sz],
))
.map_err(|e| Error::ParseError(e.into()))?;
let is_trigger = Replicated::<Boolean>::deserialize(GenericArray::from_slice(
&buf[mk_sz + ts_sz + bk_sz + tv_sz..mk_sz + ts_sz + bk_sz + tv_sz + it_sz],
))
.map_err(|e| Error::ParseError(e.into()))?;

Ok(Self {
match_key,
is_trigger,
breakdown_key,
trigger_value,
timestamp,
})
}
}

/// IPA OPRF Protocol
///
/// The output of this function is a vector of secret-shared totals, one per breakdown key
Expand All @@ -63,7 +161,7 @@ pub(crate) enum Step {
/// Propagates errors from config issues or while running the protocol
pub async fn oprf_ipa<C, BK, TV, TS, SS, F>(
ctx: C,
input_rows: Vec<OprfReport<BK, TV, TS>>,
input_rows: Vec<OPRFIPAInputRow<BK, TV, TS>>,
attribution_window_seconds: Option<NonZeroU32>,
) -> Result<Vec<Replicated<F>>, Error>
where
Expand Down Expand Up @@ -105,7 +203,7 @@ where
#[tracing::instrument(name = "compute_prf_for_inputs", skip_all)]
async fn compute_prf_for_inputs<C, BK, TV, TS, F>(
ctx: C,
input_rows: Vec<OprfReport<BK, TV, TS>>,
input_rows: Vec<OPRFIPAInputRow<BK, TV, TS>>,
) -> Result<Vec<PrfShardedIpaInputRow<BK, TV, TS>>, Error>
where
C: UpgradableContext,
Expand Down
18 changes: 11 additions & 7 deletions ipa-core/src/protocol/ipa_prf/shuffle/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ use crate::{
boolean_array::{BA112, BA64},
ArrayAccess, CustomArray, Expand, Field,
},
protocol::context::{UpgradableContext, UpgradedContext},
report::OprfReport,
protocol::{
context::{UpgradableContext, UpgradedContext},
ipa_prf::OPRFIPAInputRow,
},
secret_sharing::{
replicated::{semi_honest::AdditiveShare, ReplicatedSecretSharing},
SharedValue,
Expand All @@ -20,8 +22,8 @@ pub mod base;
#[tracing::instrument(name = "shuffle_inputs", skip_all)]
pub async fn shuffle_inputs<C, BK, TV, TS>(
ctx: C,
input: Vec<OprfReport<BK, TV, TS>>,
) -> Result<Vec<OprfReport<BK, TV, TS>>, Error>
input: Vec<OPRFIPAInputRow<BK, TV, TS>>,
) -> Result<Vec<OPRFIPAInputRow<BK, TV, TS>>, Error>
where
C: UpgradableContext,
C::UpgradedContext<Boolean>: UpgradedContext<Boolean, Share = AdditiveShare<Boolean>>,
Expand All @@ -44,7 +46,7 @@ where

// This function converts OprfReport to an AdditiveShare needed for shuffle protocol
pub fn oprfreport_to_shuffle_input<YS, BK, TV, TS>(
input: &OprfReport<BK, TV, TS>,
input: &OPRFIPAInputRow<BK, TV, TS>,
) -> AdditiveShare<YS>
where
YS: CustomArray<Element = <BA112 as CustomArray>::Element> + SharedValue,
Expand Down Expand Up @@ -73,7 +75,9 @@ where
}

// This function converts AdditiveShare obtained from shuffle protocol to OprfReport
pub fn shuffled_to_oprfreport<YS, BK, TV, TS>(input: &AdditiveShare<YS>) -> OprfReport<BK, TV, TS>
pub fn shuffled_to_oprfreport<YS, BK, TV, TS>(
input: &AdditiveShare<YS>,
) -> OPRFIPAInputRow<BK, TV, TS>
where
YS: SharedValue + CustomArray<Element = Boolean>,
BK: SharedValue + CustomArray<Element = Boolean>,
Expand All @@ -99,7 +103,7 @@ where
offset += TV::BITS as usize;
let timestamp = extract_from_shared_array::<YS, TS>(input, offset);

OprfReport {
OPRFIPAInputRow {
match_key,
is_trigger,
breakdown_key,
Expand Down
4 changes: 2 additions & 2 deletions ipa-core/src/query/executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ pub fn execute(
move |prss, gateway, config, input| {
let ctx = SemiHonestContext::new(prss, gateway);
Box::pin(
OprfIpaQuery::<_, Fp32BitPrime>::new(ipa_config)
OprfIpaQuery::<_, Fp32BitPrime>::new(ipa_config, key_registry)
.execute(ctx, config.size, input)
.then(|res| ready(res.map(|out| Box::new(out) as Box<dyn Result>))),
)
Expand All @@ -223,7 +223,7 @@ pub fn execute(
move |prss, gateway, config, input| {
let ctx = SemiHonestContext::new(prss, gateway);
Box::pin(
OprfIpaQuery::<_, crate::ff::Fp31>::new(ipa_config)
OprfIpaQuery::<_, crate::ff::Fp31>::new(ipa_config, key_registry)
.execute(ctx, config.size, input)
.then(|res| ready(res.map(|out| Box::new(out) as Box<dyn Result>))),
)
Expand Down
Loading

0 comments on commit b526f55

Please sign in to comment.