diff --git a/AGGREGATE.md b/AGGREGATE.md index 519bc9ae97..2e72e8b491 100644 --- a/AGGREGATE.md +++ b/AGGREGATE.md @@ -315,7 +315,9 @@ encoded. The map will have the following structure: "operation": "histogram", // Allows for the service to support other operations in the future "data": [{ "bucket": , - "value": + "value": , + // k is equal to the value `aggregatable_filtering_id_max_bytes`, defaults to 1 (i.e. 8-bit). + "id": }, ...] } ``` @@ -507,6 +509,47 @@ A similar design was proposed for the [Private Aggregation API](https://github.com/patcg-individual-drafts/private-aggregation-api/blob/main/report_verification.md#shared-storage) for the purpose of report verification. +### Optional: flexible contribution filtering with filtering IDs + +Trigger registration's `aggregatable_values`'s values can be integers or +dictionaries with an optional `filtering_id` field. + +```jsonc +{ + ..., // existing fields + "aggregatable_filtering_id_max_bytes": 2, // defaults to 1 + "aggregatable_values": { + "campaignCounts": 32768, + "geoValue": { + "value": 1664, + "filtering_id": "23" // must fit within bytes + } + } +} +``` + +These IDs will be included in the encrypted aggregatable report payload +contributions. + +Queries to the aggregation service can provide a list of allowed filtering IDs +and all contributions with non-allowed IDs will be filtered out. + +The filtering IDs need to be unsigned integers limited to a small number of +bytes, (1 byte = 8 bits) by default. We limit the size of the ID space to +prevent unnecessarily increasing the payload size and thus storage and +processing costs. + +This size can be increased via the `aggregatable_filtering_id_max_bytes` field. +To avoid amplifying a counting attack due to the resulting different payload +size, the browser will unconditionally send an aggregatable report on every +trigger registration with a non-default (greater than 1) max bytes. A null report +will be sent in the case that the trigger registration did not generate an +attribution report. The source registration time will always be excluded from +the aggregatable report with a non-default max bytes. This behavior is the same +as when a trigger context ID is set. + +See [flexible_filtering.md](https://github.com/patcg-individual-drafts/private-aggregation-api/blob/main/flexible_filtering.md) for more details. + ## Data processing through a Secure Aggregation Service The exact design of the service is not specified here. We expect to have more diff --git a/index.bs b/index.bs index 9d3d3676fc..ab39d711ca 100644 --- a/index.bs +++ b/index.bs @@ -871,11 +871,21 @@ An aggregatable trigger data is a [=struct=] with the following items:

Aggregatable values configuration

+An aggregatable key value is a [=struct=] with the following items: + +
+: value +:: A non-negative 32-bit integer. +: filtering ID +:: A non-negative integer. + +
+ An aggregatable values configuration is a [=struct=] with the following items:
: values -:: A [=map=] whose [=map/key|keys=] are [=strings=] and whose [=map/value|values=] are non-negative 32-bit integers. +:: A [=map=] whose [=map/key|keys=] are [=strings=] and whose [=map/value|values=] are [=aggregatable key values=]. : filters :: A [=list=] of [=filter configs=]. : negated filters @@ -972,6 +982,8 @@ An attribution trigger is a [=struct=] with the following items: :: Null or a [=string=]. : fenced :: A [=boolean=]. +: aggregatable filtering ID max bytes +:: A positive integer. : aggregatable debug reporting config :: An [=aggregatable debug reporting config=]. @@ -1036,6 +1048,8 @@ An aggregatable contribution is a [=struct=] with the following items :: A non-negative 128-bit integer. : value :: A non-negative 32-bit integer. +: filtering ID +:: A non-negative integer.
@@ -1064,6 +1078,8 @@ An aggregatable attribution report is an [=aggregatable report=] with :: A [=boolean=]. : trigger context ID :: Null or a [=string=]. +: filtering ID max bytes +:: A positive integer. : attribution debug info :: An [=attribution debug info=]. : source identifier @@ -1373,6 +1389,17 @@ controls the maximum [=map/size=] of a [=trigger spec map=] for an the maximum [=string/length=] of an [=attribution trigger=]'s [=attribution trigger/trigger context ID=]. Its value is 64. +Default filtering ID value is a non-negative integer. Its value is 0. +It is the default value for flexible contribution filtering of [=aggregatable reports=]. + +Default filtering ID max bytes is a positive integer that controls the +max bytes used if none is explicitly chosen. Its value is 1. The max bytes value +limits the size of filtering IDs within an [=aggregatable attribution report=]. + +Valid filtering ID max bytes range is a [=set=] of positive integers +that controls the allowable values of max bytes. Its value is [=the inclusive +range|the range=] 1 to 8, inclusive. + Max contributions per aggregatable debug report is a positive integer that controls the maximum [=list/size=] of an [=aggregatable debug report=]'s [=aggregatable debug report/contributions=]. Its value is 2. @@ -1695,6 +1722,8 @@ positive integer |maxValue|, and a [=set=] of [=debug data types=] :: |dataKeyPiece| : [=aggregatable contribution/value=] :: |data|["[=aggregatable-debug-reporting JSON key/value=]"] + : [=aggregatable contribution/filtering ID=] + :: [=default filtering ID value=] 1. If |data|["[=aggregatable-debug-reporting JSON key/types=]"] does not [=map/exist=], return null. 1. Let |dataTypes| be |data|["[=aggregatable-debug-reporting JSON key/types=]"]. @@ -3087,6 +3116,8 @@ and a [=boolean=] |isNoised|: |config|'s [=aggregatable debug reporting config/key piece=] : [=aggregatable contribution/value=] :: |debugDataMap|[|dataTypeToReport|]'s [=aggregatable contribution/value=] + : [=aggregatable contribution/filtering ID=] + :: [=default filtering ID value=] 1. [=list/Append=] |contribution| to |contributions|. 1. Run [=obtain and deliver an aggregatable debug report on registration=] with |contributions|, |source|'s [=attribution source/source site=], |source|'s [=attribution source/reporting origin=], @@ -3250,6 +3281,7 @@ A trigger-registration JSON key is one of the following:
  • "aggregatable_debug_reporting"
  • "aggregatable_deduplication_keys" +
  • "aggregatable_filtering_id_max_bytes"
  • "aggregatable_source_registration_time"
  • "aggregatable_trigger_data"
  • "aggregatable_values" @@ -3258,6 +3290,7 @@ A trigger-registration JSON key is one of the following:
  • "debug_reporting"
  • "deduplication_key"
  • "event_trigger_data" +
  • "filtering_id"
  • "filters"
  • "key_piece"
  • "not_filters" @@ -3358,24 +3391,62 @@ To parse aggregatable trigger data given a [=map=] |map|: 1. [=list/Append=] |aggregatableTrigger| to |aggregatableTriggerData|. 1. Return |aggregatableTriggerData|. -To parse aggregatable key-values given a [=map=] |map|: +To parse aggregatable filtering ID max bytes given a [=map=] |map|: + +1. Let |maxBytes| be [=default filtering ID max bytes=]. +1. If |map|["[=trigger-registration JSON key/aggregatable_filtering_id_max_bytes=]"] [=map/exists=]: + 1. Set |maxBytes| to |map|["[=trigger-registration JSON key/aggregatable_filtering_id_max_bytes=]"]. + 1. If |maxBytes| is a positive integer and is [=set/contained=] in the [=valid filtering ID max bytes range=], + return |maxBytes|. + 1. Otherwise, return null. +1. Return |maxBytes|. + +To validate aggregatable key-values value given a |value|: +1. If |value| is not an integer, return false. +1. If |value| is less than or equal to 0, return false. +1. If |value| is greater than [=allowed aggregatable budget per source=], return false. +1. Return true. + +To parse aggregatable key-values given a [=map=] |map| and a positive integer |maxBytes|: +1. Let |out| be a new [=map=]. 1. [=map/iterate|For each=] |key| → |value| of |map|: 1. If |key|'s [=string/length=] is greater than the [=max length per aggregation key identifier=], return null. - 1. If |value| is not an integer, return null. - 1. If |value| is less than or equal to 0, return null. - 1. If |value| is greater than [=allowed aggregatable budget per source=], return null. -1. Return |map|. - -To parse aggregatable values given a [=map=] |map|: + 1. If |value| is not a [=map=] or an integer, return null. + 1. If |value| is an integer: + 1. If the result of running [=validate aggregatable key-values value=] with |value| is false, return null. + 1. [=map/Set=] |out|[|key|] to a new [=aggregatable key value=] whose items are + : [=aggregatable key value/value=] + :: |value| + : [=aggregatable key value/filtering ID=] + :: [=default filtering ID value=] + 1. [=iteration/Continue=]. + 1. If |value|["[=trigger-registration JSON key/value=]"] does not [=map/exist=], return null. + 1. If the result of running [=validate aggregatable key-values value=] with + |value|["[=trigger-registration JSON key/value=]"] is false, return null. + 1. Let |filteringId| be [=default filtering ID value=]. + 1. If |value|["[=trigger-registration JSON key/filtering_id=]"] [=map/exists=]: + 1. Set |filteringId| to the result of applying the + rules for parsing non-negative integers to |value|["[=trigger-registration JSON key/filtering_id=]"]. + 1. If |filteringId| is an error, return null. + 1. If |filteringId| is not in [=the exclusive range|the range=] + 0 to 256|maxBytes|, exclusive, return null. + 1. [=map/Set=] |out|[|key|] to a new [=aggregatable key value=] whose items are + : [=aggregatable key value/value=] + :: |value|["[=trigger-registration JSON key/value=]"] + : [=aggregatable key value/filtering ID=] + :: |filteringId| +1. Return |out|. + +To parse aggregatable values given a [=map=] |map| and a positive integer |maxBytes|: 1. If |map|["[=trigger-registration JSON key/aggregatable_values=]"] does not [=map/exist=], return a new [=list=]. 1. Let |values| be |map|["[=trigger-registration JSON key/aggregatable_values=]"]. 1. If |values| is not a [=map=] or a [=list=], return null. 1. Let |aggregatableValuesConfigurations| be a [=list=] of [=aggregatable values configurations=], initially empty. 1. If |values| is a [=map=]: - 1. Let |aggregatableKeyValues| be the result of running [=parse aggregatable key-values=] with |values|. + 1. Let |aggregatableKeyValues| be the result of running [=parse aggregatable key-values=] with |values| and |maxBytes|. 1. If |aggregatableKeyValues| is null, return null. 1. Let |aggregatableValuesConfiguration| be a new [=aggregatable values configuration=] with the items: : [=aggregatable values configuration/values=] @@ -3389,7 +3460,8 @@ To parse aggregatable values given a [=map=] |map|: 1. [=list/iterate|For each=] |value| of |values|: 1. If |value| is not a [=map=], return null. 1. If |value|["[=trigger-registration JSON key/values=]"] does not [=map/exist=], return null. - 1. Let |aggregatableKeyValues| be the result of running [=parse aggregatable key-values=] with |value|["[=trigger-registration JSON key/values=]"]. + 1. Let |aggregatableKeyValues| be the result of running [=parse aggregatable key-values=] with + |value|["[=trigger-registration JSON key/values=]"] and |maxBytes|. 1. If |aggregatableKeyValues| is null, return null. 1. Let |filterPair| be the result of running [=parse a filter pair=] with |value|. @@ -3442,7 +3514,10 @@ a [=moment=] |triggerTime|, and a [=boolean=] |fenced|: 1. Let |aggregatableTriggerData| be the result of running [=parse aggregatable trigger data=] with |value|. 1. If |aggregatableTriggerData| is null, return null. -1. Let |aggregatableValuesConfigurations| be the result of running [=parse aggregatable values=] with |value|. +1. Let |filteringIdsMaxBytes| be the result of [=parsing aggregatable filtering ID max bytes=] with |value|. +1. If |filteringIdsMaxBytes| is null, return null. +1. Let |aggregatableValuesConfigurations| be the result of running + [=parse aggregatable values=] with |value| and |filteringIdsMaxBytes|. 1. If |aggregatableValuesConfigurations| is null, return null. 1. Let |aggregatableDedupKeys| be the result of running [=parse aggregatable dedup keys=] with |value|. @@ -3474,7 +3549,6 @@ a [=moment=] |triggerTime|, and a [=boolean=] |fenced|: 1. If |value|["[=trigger-registration JSON key/trigger_context_id=]"] is not a [=string=], return null. 1. If |value|["[=trigger-registration JSON key/trigger_context_id=]"]'s [=string/length=] is greater than the [=max length per trigger context ID=], return null. - 1. If |aggregatableSourceRegTimeConfig| is not "[=aggregatable source registration time configuration/exclude=]", return null. 1. Set |triggerContextID| to |value|["[=trigger-registration JSON key/trigger_context_id=]"]. 1. Let |aggregatableDebugReportingConfig| be a new [=aggregatable debug reporting config=]. 1. If |value|["[=trigger-registration JSON key/aggregatable_debug_reporting=]"] [=map/exists=]: @@ -3515,8 +3589,12 @@ a [=moment=] |triggerTime|, and a [=boolean=] |fenced|: :: |triggerContextID| : [=attribution trigger/fenced=] :: |fenced| + : [=attribution trigger/aggregatable filtering ID max bytes=] + :: |filteringIdsMaxBytes| : [=attribution trigger/aggregatable debug reporting config=] :: |aggregatableDebugReportingConfig| +1. If |aggregatableSourceRegTimeConfig| is not "[=aggregatable source registration time configuration/exclude=]" + and the result of running [=check if an aggregatable attribution report should be unconditionally sent=] with |trigger| is true, return null. 1. Return |trigger|. Issue: Determine proper charset-handling for the JSON header value. @@ -3587,6 +3665,15 @@ To match an attribution source against filters and negated filters gi |source|, |notFilters|, |moment|, and [=match an attribution source against filters/isNegated=] set to true is false, return false. 1. Return true. +

    Should send a report unconditionally

    + +To check if an aggregatable attribution report should be unconditionally sent given an [=attribution trigger=] |trigger|: + +1. If |trigger|'s [=attribution trigger/trigger context ID=] is not null, return true. +1. If |trigger|'s [=attribution trigger/aggregatable filtering ID max bytes=] + is not equal to [=default filtering ID max bytes=], return true. +1. Return false. +

    Should attribution be blocked by rate limits

    To check if attribution should be blocked by attribution rate limit given an [=attribution trigger=] |trigger|, an [=attribution source=] |sourceToAttribute|, and a [=attribution rate-limit record/scope=] |rateLimitScope|: @@ -3632,7 +3719,9 @@ To create [=aggregatable contributions=] from [=attribution source/aggregat : [=aggregatable contribution/key=] :: |key| : [=aggregatable contribution/value=] - :: |aggregatableValues|[|id|] + :: |aggregatableValues|[|id|]'s [=aggregatable key value/value=] + : [=aggregatable contribution/filtering ID=] + :: |aggregatableValues|[|id|]'s [=aggregatable key value/filtering ID=] 1. [=list/Append=] |contribution| to |contributions|. 1. Return |contributions|. @@ -4021,6 +4110,8 @@ and an optional [=attribution source=] |sourceToAttribute|: :: |keyPiece| : [=aggregatable contribution/value=] :: |debugDataMap|[|type|]'s [=aggregatable contribution/value=] + : [=aggregatable contribution/filtering ID=] + :: [=default filtering ID value=] 1. [=list/Append=] |contribution| to |contributions|. 1. Run [=obtain and deliver an aggregatable debug report on registration=] with |contributions|, |trigger|'s [=attribution trigger/attribution destination=], |trigger|'s [=attribution trigger/reporting origin=], @@ -4133,7 +4224,8 @@ To obtain an aggregatable attribution report delivery time given an [ |trigger|, perform the following steps. They return a [=moment=]. 1. Let |triggerTime| be |trigger|'s [=attribution trigger/trigger time=]. -1. If |trigger|'s [=attribution trigger/trigger context ID=] is not null, return |triggerTime|. +1. If the result of running [=check if an aggregatable attribution report should be unconditionally sent=] with |trigger| + is true, return |triggerTime|. 1. Let |r| be a random double between 0 (inclusive) and 1 (exclusive) with uniform probability. 1. Return |triggerTime| + |r| * [=randomized aggregatable attribution report delay=]. @@ -4211,6 +4303,8 @@ an [=attribution trigger=] |trigger|: :: |trigger|'s [=attribution trigger/aggregatable source registration time configuration=]. : [=aggregatable attribution report/trigger context ID=] :: |trigger|'s [=attribution trigger/trigger context ID=] + : [=aggregatable attribution report/filtering ID max bytes=] + :: |trigger|'s [=attribution trigger/aggregatable filtering ID max bytes=] : [=aggregatable attribution report/source identifier=] :: |source|'s [=attribution source/source identifier=]. 1. Return |report|. @@ -4246,6 +4340,8 @@ To obtain a null attribution report given an [=attribution trigger=] :: true : [=aggregatable attribution report/trigger context ID=] :: |trigger|'s [=attribution trigger/trigger context ID=] + : [=aggregatable attribution report/filtering ID max bytes=] + :: |trigger|'s [=attribution trigger/aggregatable filtering ID max bytes=] 1. Return |report|. To obtain rounded source time given a [=moment=] |sourceTime|, return |sourceTime| in seconds @@ -4263,8 +4359,8 @@ To generate null attribution reports given an [=attribution trigger=] 1. Let |nullReports| be a new [=list=]. 1. If |trigger|'s [=attribution trigger/aggregatable source registration time configuration=] is "[=aggregatable source registration time configuration/exclude=]": 1. Let |randomizedNullReportRate| be [=randomized null attribution report rate excluding source registration time=]. - 1. If |trigger|'s [=attribution trigger/trigger context ID=] is not null, set - |randomizedNullReportRate| to 1. + 1. If the result of running [=check if an aggregatable attribution report should be unconditionally sent=] with |trigger| + is true, set |randomizedNullReportRate| to 1. 1. If |report| is null and the result of [=determining if a randomized null attribution report is generated=] with |randomizedNullReportRate| is true: 1. Let |nullReport| be the result of [=obtaining a null attribution report=] with |trigger| and |trigger|'s @@ -4357,8 +4453,10 @@ To queue reports for delivery given a [=set=] of

    Encode an unsigned k-bit integer

    -To encode an unsigned k-bit integer, represent it as a big-endian [=byte sequence=] -of length k / 8, left padding with zero as necessary. +To encode an unsigned k-byte integer given an integer |integerToEncode| +and an integer |byteLength|, return the representation of |integerToEncode| as a +big-endian [=byte sequence=] of length |byteLength|, left padding with zeroes as +necessary.

    Obtaining an aggregatable report's debug mode

    @@ -4411,7 +4509,7 @@ of running the following steps: : "`scheduled_report_time`" :: |report|'s [=aggregatable report/report time=] in seconds since the UNIX epoch, [=serialize an integer|serialized=] : "`version`" - :: "`0.1`" + :: "`1.0`" Note: The "`version`" value needs to be bumped if the aggregation service upgrades. @@ -4471,15 +4569,23 @@ is the result of running the following steps: :: 0 : [=aggregatable contribution/value=] :: 0 + : [=aggregatable contribution/filtering ID=] + :: [=default filtering ID value=] 1. [=list/Append=] |nullContribution| to |contributions|. 1. [=list/iterate|For each=] |contribution| of |contributions|: 1. Let |contributionData| be a [=map=] of the following key/value pairs: : "`bucket`" - :: |contribution|'s [=aggregatable contribution/key=], [=encode an unsigned k-bit integer|encoded=] + :: The result of [=encoding an unsigned k-byte integer=] given + |contribution|'s [=aggregatable contribution/key=] and 16. : "`value`" - :: |contribution|'s [=aggregatable contribution/value=], [=encode an unsigned k-bit integer|encoded=] + :: The result of [=encoding an unsigned k-byte integer=] given + |contribution|'s [=aggregatable contribution/value=] and 4. + : "`id`" + :: The result of [=encoding an unsigned k-byte integer=] given + |contribution|'s [=aggregatable contribution/filtering ID=] and + |report|'s [=aggregatable attribution report/filtering ID max bytes=]. 1. [=list/Append=] |contributionData| to |payloadData|. 1. Let |payload| be a [=map=] of the following key/value pairs: diff --git a/ts/src/constants.ts b/ts/src/constants.ts index 2d30fd7c3d..bdc09f0d35 100644 --- a/ts/src/constants.ts +++ b/ts/src/constants.ts @@ -51,6 +51,12 @@ export const defaultTriggerDataCardinality: Readonly< [SourceType.navigation]: 8n, } +export const defaultAggregatableFilteringIdMaxBytes: number = 1 + +export const maxAggregatableFilteringIdMaxBytesValue: number = 8 + +export const defaultFilteringIdValue: bigint = 0n + export const sourceAggregatableDebugTypes: Readonly<[string, ...string[]]> = [ 'source-channel-capacity-limit', 'source-destination-global-rate-limit', diff --git a/ts/src/header-validator/aggregatable-contributions.test.ts b/ts/src/header-validator/aggregatable-contributions.test.ts index a68df2b7e2..019e6e6d6d 100644 --- a/ts/src/header-validator/aggregatable-contributions.test.ts +++ b/ts/src/header-validator/aggregatable-contributions.test.ts @@ -87,8 +87,8 @@ void test('basic', () => { const aggregatableValuesCfgs: AggregatableValuesConfiguration[] = [ { values: new Map([ - ['key1', 32768], - ['key2', 1664], + ['key1', { value: 32768, filteringId: 25n }], + ['key2', { value: 1664, filteringId: 0n }], ]), positive: [], negative: [], @@ -109,10 +109,12 @@ void test('basic', () => { { key: 1369n, value: 32768, + filteringId: 25n, }, { key: 2693n, value: 1664, + filteringId: 0n, }, ]) }) @@ -151,7 +153,7 @@ void test('values-filtered', async (t) => { assert.deepEqual( createWith([ { - values: new Map([['key1', 32768]]), + values: new Map([['key1', { value: 32768, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['2'])]]), @@ -169,7 +171,7 @@ void test('values-filtered', async (t) => { assert.deepEqual( createWith([ { - values: new Map([['key1', 32768]]), + values: new Map([['key1', { value: 32768, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['2'])]]), @@ -179,7 +181,7 @@ void test('values-filtered', async (t) => { negative: [], }, { - values: new Map([['key2', 1664]]), + values: new Map([['key2', { value: 1664, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['1'])]]), @@ -189,7 +191,7 @@ void test('values-filtered', async (t) => { negative: [], }, ]), - [{ key: 1029n, value: 1664 }] + [{ key: 1029n, value: 1664, filteringId: 0n }] ) ) @@ -197,7 +199,7 @@ void test('values-filtered', async (t) => { assert.deepEqual( createWith([ { - values: new Map([['key1', 32768]]), + values: new Map([['key1', { value: 32768, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['1'])]]), @@ -207,7 +209,7 @@ void test('values-filtered', async (t) => { negative: [], }, { - values: new Map([['key2', 1664]]), + values: new Map([['key2', { value: 1664, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['1'])]]), @@ -217,7 +219,7 @@ void test('values-filtered', async (t) => { negative: [], }, ]), - [{ key: 1369n, value: 32768 }] + [{ key: 1369n, value: 32768, filteringId: 0n }] ) ) @@ -225,7 +227,7 @@ void test('values-filtered', async (t) => { assert.deepEqual( createWith([ { - values: new Map([['key3', 32768]]), + values: new Map([['key3', { value: 32768, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['1'])]]), @@ -237,7 +239,7 @@ void test('values-filtered', async (t) => { // Shouldn't contribute as only the first aggregatable values // entry with matching filters is considered { - values: new Map([['key2', 1664]]), + values: new Map([['key2', { value: 1664, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['1'])]]), @@ -255,7 +257,7 @@ void test('values-filtered', async (t) => { assert.deepEqual( createWith([ { - values: new Map([['key1', 32768]]), + values: new Map([['key1', { value: 32768, filteringId: 0n }]]), positive: [], negative: [ { @@ -265,7 +267,7 @@ void test('values-filtered', async (t) => { ], }, { - values: new Map([['key2', 1664]]), + values: new Map([['key2', { value: 1664, filteringId: 0n }]]), positive: [ { map: new Map([['product', new Set(['1'])]]), @@ -275,7 +277,7 @@ void test('values-filtered', async (t) => { negative: [], }, ]), - [{ key: 1029n, value: 1664 }] + [{ key: 1029n, value: 1664, filteringId: 0n }] ) ) }) diff --git a/ts/src/header-validator/aggregatable-contributions.ts b/ts/src/header-validator/aggregatable-contributions.ts index 748332a604..1c66840947 100644 --- a/ts/src/header-validator/aggregatable-contributions.ts +++ b/ts/src/header-validator/aggregatable-contributions.ts @@ -11,6 +11,7 @@ import { export type AggregatableContribution = { key: bigint value: number + filteringId: bigint } // https://wicg.github.io/attribution-reporting-api/#create-aggregatable-contributions-from-aggregation-keys-and-aggregatable-values @@ -24,7 +25,7 @@ function createAggregatableContributionsFromKeysAndValues( if (value === undefined) { continue } - contributions.push({ key, value }) + contributions.push({ key, ...value }) } return contributions } diff --git a/ts/src/header-validator/to-json.ts b/ts/src/header-validator/to-json.ts index ceb6b237aa..b7643a543b 100644 --- a/ts/src/header-validator/to-json.ts +++ b/ts/src/header-validator/to-json.ts @@ -309,17 +309,30 @@ function serializeAggregatableTriggerDatum( } } +export type AggregatableValues = { + [key: string]: { + value: number + filtering_id: string + } +} + export type AggregatableValuesConfiguration = FilterPair & { - values: { [key: string]: number } + values: AggregatableValues } function serializeAggregatableValuesConfiguration( c: parsed.AggregatableValuesConfiguration ): AggregatableValuesConfiguration { + const values: AggregatableValues = {} + for (const [key, value] of c.values.entries()) { + values[key] = { + value: value.value, + filtering_id: value.filteringId.toString(), + } + } return { ...serializeFilterPair(c), - - values: Object.fromEntries(c.values.entries()), + values, } } @@ -328,6 +341,7 @@ export type Trigger = CommonDebug & aggregatable_deduplication_keys: AggregatableDedupKey[] aggregatable_source_registration_time: string aggregatable_trigger_data: AggregatableTriggerDatum[] + aggregatable_filtering_id_max_bytes: number aggregatable_values: AggregatableValuesConfiguration[] aggregation_coordinator_origin: string event_trigger_data: EventTriggerDatum[] @@ -355,6 +369,8 @@ export function serializeTrigger( serializeAggregatableTriggerDatum ), + aggregatable_filtering_id_max_bytes: t.aggregatableFilteringIdMaxBytes, + aggregatable_values: Array.from( t.aggregatableValuesConfigurations, serializeAggregatableValuesConfiguration diff --git a/ts/src/header-validator/trigger.test.ts b/ts/src/header-validator/trigger.test.ts index b242c88807..ee727ae740 100644 --- a/ts/src/header-validator/trigger.test.ts +++ b/ts/src/header-validator/trigger.test.ts @@ -28,9 +28,10 @@ const testCases: jsontest.TestCase[] = [ "filters": {"a": ["b"]}, "key_piece": "0x1", "not_filters": {"c": ["d"]}, - "source_keys": ["x"] + "source_keys": ["x", "y"] }], - "aggregatable_values": {"x": 5}, + "aggregatable_filtering_id_max_bytes": 1, + "aggregatable_values": {"x": 5, "y": {"value": 10, "filtering_id": "25" }}, "debug_key": "5", "debug_reporting": true, "event_trigger_data": [{ @@ -101,12 +102,16 @@ const testCases: jsontest.TestCase[] = [ map: new Map([['c', new Set(['d'])]]), }, ], - sourceKeys: new Set(['x']), + sourceKeys: new Set(['x', 'y']), }, ], + aggregatableFilteringIdMaxBytes: 1, aggregatableValuesConfigurations: [ { - values: new Map([['x', 5]]), + values: new Map([ + ['x', { value: 5, filteringId: 0n }], + ['y', { value: 10, filteringId: 25n }], + ]), positive: [], negative: [], }, @@ -376,7 +381,7 @@ const testCases: jsontest.TestCase[] = [ expectedErrors: [ { path: ['aggregatable_values', 'a'], - msg: 'must be a number', + msg: 'must be a number or an object', }, ], }, @@ -1097,7 +1102,102 @@ const testCases: jsontest.TestCase[] = [ }, ], }, - + { + name: 'aggregatable_filtering_id_max_bytes-too-big', + json: `{ + "aggregatable_filtering_id_max_bytes": 9 + }`, + expectedErrors: [ + { + path: ['aggregatable_filtering_id_max_bytes'], + msg: 'must be in the range [1, 8]', + }, + ], + }, + { + name: 'aggregatable_filtering_id_max_bytes-invalid-aggregatable-source-registration-time', + json: `{"aggregatable_filtering_id_max_bytes": 2, "aggregatable_source_registration_time": 1}`, + expectedErrors: [ + { + path: ['aggregatable_source_registration_time'], + msg: 'must be a string', + }, + { + path: ['aggregatable_filtering_id_max_bytes'], + msg: 'cannot be fully validated without a valid aggregatable_source_registration_time', + }, + ], + }, + { + name: 'aggregatable_filtering_id_max_bytes-prohibited-aggregatable-source-registration-time-include', + json: `{"aggregatable_filtering_id_max_bytes": 2, "aggregatable_source_registration_time": "include"}`, + expectedErrors: [ + { + path: ['aggregatable_filtering_id_max_bytes'], + msg: 'with a non-default value (higher than 1) is prohibited for aggregatable_source_registration_time include', + }, + ], + }, + { + name: 'aggregatable-values-with-too-big-filtering_id', + json: `{ + "aggregatable_trigger_data": [{ + "key_piece": "0x1", + "source_keys": ["x", "y"] + }], + "aggregatable_values": {"x": 5, "y": { "value": 10, "filtering_id": "256" }} + }`, + expectedErrors: [ + { + path: ['aggregatable_values', 'y', 'filtering_id'], + msg: 'must be in the range [0, 255]. It exceeds the default max size of 1 byte. To increase, specify the aggregatable_filtering_id_max_bytes property.', + }, + ], + }, + { + name: 'aggregatable-values-with-too-big-filtering_id-non-default-max', + json: `{ + "aggregatable_trigger_data": [{ + "key_piece": "0x1", + "source_keys": ["x", "y"] + }], + "aggregatable_filtering_id_max_bytes": 2, + "aggregatable_values": [ + {"values": {"x": 5 }}, + {"values": {"y": { "value": 10, "filtering_id": "65536" }}} + ] + }`, + expectedErrors: [ + { + path: ['aggregatable_values', 1, 'values', 'y', 'filtering_id'], + msg: 'must be in the range [0, 65535]', + }, + ], + }, + { + name: 'aggregatable-values-with-invalid-filtering_id-non-default-max', + json: `{ + "aggregatable_trigger_data": [{ + "key_piece": "0x1", + "source_keys": ["x", "y"] + }], + "aggregatable_filtering_id_max_bytes": "2", + "aggregatable_values": [ + {"values": {"x": 5 }}, + {"values": {"y": { "value": 10, "filtering_id": "65536" }}} + ] + }`, + expectedErrors: [ + { + msg: 'must be a number', + path: ['aggregatable_filtering_id_max_bytes'], + }, + { + path: ['aggregatable_values', 1, 'values', 'y', 'filtering_id'], + msg: 'cannot be fully validated without a valid aggregatable_filtering_id_max_bytes', + }, + ], + }, { name: 'aggregatable-debug-reporting-wrong-type', json: `{ diff --git a/ts/src/header-validator/validate-json.ts b/ts/src/header-validator/validate-json.ts index 2767fd7185..9cfc5164f8 100644 --- a/ts/src/header-validator/validate-json.ts +++ b/ts/src/header-validator/validate-json.ts @@ -164,7 +164,7 @@ function list(j: Json, ctx: Context): Maybe { return typeSwitch(j, ctx, { list: some }) } -function uint64(j: Json, ctx: Context): Maybe { +function uint(j: Json, ctx: Context): Maybe { return string(j, ctx) .filter( matchesPattern, @@ -173,13 +173,16 @@ function uint64(j: Json, ctx: Context): Maybe { 'string must represent a non-negative integer' ) .map(BigInt) - .filter( - isInRange, - ctx, - 0n, - 2n ** 64n - 1n, - 'must fit in an unsigned 64-bit integer' - ) +} + +function uint64(j: Json, ctx: Context): Maybe { + return uint(j, ctx).filter( + isInRange, + ctx, + 0n, + 2n ** 64n - 1n, + 'must fit in an unsigned 64-bit integer' + ) } function number(j: Json, ctx: Context): Maybe { @@ -536,8 +539,7 @@ function aggregatableDebugReportingData( requireDistinct: true, }) ), - value: field('value', aggregatableValue), - + value: field('value', aggregatableKeyValueValue), ...keyPieceField, }) } @@ -769,8 +771,7 @@ function sourceAggregatableDebugReportingConfig( ctx: RegistrationContext ): Maybe { return struct(j, ctx, { - budget: field('budget', aggregatableValue), - + budget: field('budget', aggregatableKeyValueValue), ...aggregatableDebugReportingConfig, }).filter((s) => { for (const d of s.debugData) { @@ -1166,54 +1167,137 @@ function aggregatableTriggerData( ) } -export type AggregatableValues = Map +export type AggregatableValuesValue = { + value: number + filteringId: bigint +} + +export type AggregatableValues = Map export type AggregatableValuesConfiguration = FilterPair & { values: AggregatableValues } -function aggregatableValue(j: Json, ctx: Context): Maybe { +function aggregatableKeyValueValue(j: Json, ctx: Context): Maybe { return number(j, ctx) .filter(isInteger, ctx) .filter(isInRange, ctx, 1, constants.allowedAggregatableBudgetPerSource) } +function aggregatableKeyValueFilteringId( + j: Json, + ctx: Context, + maxBytes: Maybe +): Maybe { + return uint(j, ctx).filter((n) => { + if (maxBytes.value === undefined) { + ctx.error( + `cannot be fully validated without a valid aggregatable_filtering_id_max_bytes` + ) + return false + } + return isInRange( + n, + ctx, + 0n, + 256n ** BigInt(maxBytes.value) - 1n, + maxBytes.value == constants.defaultAggregatableFilteringIdMaxBytes + ? 'must be in the range [0, 255]. It exceeds the default max size of 1 byte. To increase, specify the aggregatable_filtering_id_max_bytes property.' + : undefined + ) + }) +} + function aggregatableKeyValue( [key, j]: [string, Json], - ctx: Context -): Maybe { + ctx: Context, + maxBytes: Maybe +): Maybe { if (!aggregationKeyIdentifierLength(key, ctx, 'key ')) { return None } - return aggregatableValue(j, ctx) + + return typeSwitch(j, ctx, { + number: (j) => + aggregatableKeyValueValue(j, ctx).map((j) => ({ + value: j, + filteringId: constants.defaultFilteringIdValue, + })), + object: (j) => + struct(j, ctx, { + value: field('value', aggregatableKeyValueValue), + filteringId: field('filtering_id', (j) => + aggregatableKeyValueFilteringId(j, ctx, maxBytes) + ), + }), + }) } function aggregatableKeyValues( j: Json, - ctx: Context + ctx: Context, + maxBytes: Maybe ): Maybe { - return keyValues(j, ctx, aggregatableKeyValue) + return keyValues(j, ctx, (j) => aggregatableKeyValue(j, ctx, maxBytes)) } function aggregatableValuesConfigurations( j: Json, - ctx: Context + ctx: Context, + maxBytes: Maybe ): Maybe { return typeSwitch(j, ctx, { object: (j) => - aggregatableKeyValues(j, ctx).map((values) => [ + aggregatableKeyValues(j, ctx, maxBytes).map((values) => [ { values, positive: [], negative: [] }, ]), list: (j) => array(j, ctx, (j) => struct(j, ctx, { - values: field('values', aggregatableKeyValues), + values: field('values', (j) => + aggregatableKeyValues(j, ctx, maxBytes) + ), ...filterFields, }) ), }) } +function aggregatableFilteringIdMaxBytes( + j: Json, + ctx: Context, + aggregatableSourceRegTime: Maybe +): Maybe { + return number(j, ctx) + .filter(isInteger, ctx) + .filter( + isInRange, + ctx, + 1, + constants.maxAggregatableFilteringIdMaxBytesValue + ) + .filter((n) => { + if (aggregatableSourceRegTime.value === undefined) { + ctx.error( + `cannot be fully validated without a valid aggregatable_source_registration_time` + ) + return false + } + if ( + aggregatableSourceRegTime.value !== + AggregatableSourceRegistrationTime.exclude && + n !== constants.defaultAggregatableFilteringIdMaxBytes + ) { + ctx.error( + `with a non-default value (higher than ${constants.defaultAggregatableFilteringIdMaxBytes}) is prohibited for aggregatable_source_registration_time ${aggregatableSourceRegTime.value}` + ) + return false + } + + return true + }) +} + export type EventTriggerDatum = FilterPair & Priority & DedupKey & { @@ -1373,6 +1457,7 @@ export type Trigger = CommonDebug & aggregatableDedupKeys: AggregatableDedupKey[] aggregatableTriggerData: AggregatableTriggerDatum[] aggregatableSourceRegistrationTime: AggregatableSourceRegistrationTime + aggregatableFilteringIdMaxBytes: number aggregatableValuesConfigurations: AggregatableValuesConfiguration[] aggregationCoordinatorOrigin: string eventTriggerData: EventTriggerDatum[] @@ -1389,15 +1474,29 @@ function trigger(j: Json, ctx: RegistrationContext): Maybe { AggregatableSourceRegistrationTime.exclude )(j, ctx) + const aggregatableFilteringIdMaxBytesVal = field( + 'aggregatable_filtering_id_max_bytes', + (j) => + aggregatableFilteringIdMaxBytes(j, ctx, aggregatableSourceRegTimeVal), + constants.defaultAggregatableFilteringIdMaxBytes + )(j, ctx) + return struct(j, ctx, { aggregatableTriggerData: field( 'aggregatable_trigger_data', aggregatableTriggerData, [] ), + aggregatableFilteringIdMaxBytes: () => + aggregatableFilteringIdMaxBytesVal, aggregatableValuesConfigurations: field( 'aggregatable_values', - aggregatableValuesConfigurations, + (j) => + aggregatableValuesConfigurations( + j, + ctx, + aggregatableFilteringIdMaxBytesVal + ), [] ), aggregatableDedupKeys: field(