From 192e30d6157638692055e803f7eccc84ddd5e3d5 Mon Sep 17 00:00:00 2001 From: gabe Date: Thu, 6 Apr 2023 14:51:30 -0700 Subject: [PATCH] move to v1.1.0 --- CODEOWNERS | 2 +- docs/{ => v1.0.1}/api/content.md | 0 docs/{ => v1.0.1}/api/title.md | 0 docs/{ => v1.0.1}/bitcoin.md | 0 docs/{ => v1.0.1}/contribution-guidlines.md | 0 docs/{ => v1.0.1}/core.md | 0 docs/{ => v1.0.1}/design.md | 0 docs/{ => v1.0.1}/spec/abstract.md | 0 docs/{ => v1.0.1}/spec/appendix.md | 0 docs/{ => v1.0.1}/spec/common-functions.md | 0 docs/{ => v1.0.1}/spec/context.md | 0 docs/{ => v1.0.1}/spec/did-uri.md | 0 docs/{ => v1.0.1}/spec/file-structures.md | 0 docs/{ => v1.0.1}/spec/guidelines.md | 0 docs/{ => v1.0.1}/spec/intro.md | 0 docs/{ => v1.0.1}/spec/json-web-signatures.md | 0 docs/{ => v1.0.1}/spec/method-versioning.md | 0 docs/{ => v1.0.1}/spec/operations.md | 0 docs/{ => v1.0.1}/spec/parameters.md | 0 docs/{ => v1.0.1}/spec/patches.md | 0 docs/{ => v1.0.1}/spec/pof.md | 0 docs/{ => v1.0.1}/spec/processing.md | 0 docs/{ => v1.0.1}/spec/resolution.md | 0 docs/{ => v1.0.1}/spec/terminology.md | 0 docs/{ => v1.0.1}/spec/title.md | 0 docs/{ => v1.0.1}/spec/topology.md | 0 docs/{ => v1.0.1}/spec/versioning.md | 0 docs/{ => v1.0.1}/styleguide.md | 0 docs/{ => v1.0.1}/type-registry.md | 0 docs/v1.1.0/api/content.md | 157 ++ docs/v1.1.0/api/title.md | 28 + docs/v1.1.0/bitcoin.md | 77 + docs/v1.1.0/contribution-guidlines.md | 72 + docs/v1.1.0/core.md | 752 ++++++ docs/v1.1.0/design.md | 65 + docs/v1.1.0/spec/abstract.md | 3 + docs/v1.1.0/spec/appendix.md | 80 + docs/v1.1.0/spec/common-functions.md | 39 + docs/v1.1.0/spec/context.md | 109 + docs/v1.1.0/spec/did-uri.md | 105 + docs/v1.1.0/spec/file-structures.md | 231 ++ docs/v1.1.0/spec/guidelines.md | 16 + docs/v1.1.0/spec/intro.md | 7 + docs/v1.1.0/spec/json-web-signatures.md | 73 + docs/v1.1.0/spec/method-versioning.md | 26 + docs/v1.1.0/spec/operations.md | 143 ++ docs/v1.1.0/spec/parameters.md | 27 + docs/v1.1.0/spec/patches.md | 288 +++ docs/v1.1.0/spec/pof.md | 33 + docs/v1.1.0/spec/processing.md | 119 + docs/v1.1.0/spec/resolution.md | 159 ++ docs/v1.1.0/spec/terminology.md | 32 + docs/v1.1.0/spec/title.md | 31 + docs/v1.1.0/spec/topology.md | 9 + docs/v1.1.0/spec/versioning.md | 37 + docs/v1.1.0/styleguide.md | 12 + docs/v1.1.0/type-registry.md | 15 + specs.json | 58 +- www/api/v1.0.1/index.html | 312 +++ www/spec/v1.0.1/index.html | 2250 +++++++++++++++++ 60 files changed, 5362 insertions(+), 5 deletions(-) rename docs/{ => v1.0.1}/api/content.md (100%) rename docs/{ => v1.0.1}/api/title.md (100%) rename docs/{ => v1.0.1}/bitcoin.md (100%) rename docs/{ => v1.0.1}/contribution-guidlines.md (100%) rename docs/{ => v1.0.1}/core.md (100%) rename docs/{ => v1.0.1}/design.md (100%) rename docs/{ => v1.0.1}/spec/abstract.md (100%) rename docs/{ => v1.0.1}/spec/appendix.md (100%) rename docs/{ => v1.0.1}/spec/common-functions.md (100%) rename docs/{ => v1.0.1}/spec/context.md (100%) rename docs/{ => v1.0.1}/spec/did-uri.md (100%) rename docs/{ => v1.0.1}/spec/file-structures.md (100%) rename docs/{ => v1.0.1}/spec/guidelines.md (100%) rename docs/{ => v1.0.1}/spec/intro.md (100%) rename docs/{ => v1.0.1}/spec/json-web-signatures.md (100%) rename docs/{ => v1.0.1}/spec/method-versioning.md (100%) rename docs/{ => v1.0.1}/spec/operations.md (100%) rename docs/{ => v1.0.1}/spec/parameters.md (100%) rename docs/{ => v1.0.1}/spec/patches.md (100%) rename docs/{ => v1.0.1}/spec/pof.md (100%) rename docs/{ => v1.0.1}/spec/processing.md (100%) rename docs/{ => v1.0.1}/spec/resolution.md (100%) rename docs/{ => v1.0.1}/spec/terminology.md (100%) rename docs/{ => v1.0.1}/spec/title.md (100%) rename docs/{ => v1.0.1}/spec/topology.md (100%) rename docs/{ => v1.0.1}/spec/versioning.md (100%) rename docs/{ => v1.0.1}/styleguide.md (100%) rename docs/{ => v1.0.1}/type-registry.md (100%) create mode 100644 docs/v1.1.0/api/content.md create mode 100644 docs/v1.1.0/api/title.md create mode 100644 docs/v1.1.0/bitcoin.md create mode 100644 docs/v1.1.0/contribution-guidlines.md create mode 100644 docs/v1.1.0/core.md create mode 100644 docs/v1.1.0/design.md create mode 100644 docs/v1.1.0/spec/abstract.md create mode 100644 docs/v1.1.0/spec/appendix.md create mode 100644 docs/v1.1.0/spec/common-functions.md create mode 100644 docs/v1.1.0/spec/context.md create mode 100644 docs/v1.1.0/spec/did-uri.md create mode 100644 docs/v1.1.0/spec/file-structures.md create mode 100644 docs/v1.1.0/spec/guidelines.md create mode 100644 docs/v1.1.0/spec/intro.md create mode 100644 docs/v1.1.0/spec/json-web-signatures.md create mode 100644 docs/v1.1.0/spec/method-versioning.md create mode 100644 docs/v1.1.0/spec/operations.md create mode 100644 docs/v1.1.0/spec/parameters.md create mode 100644 docs/v1.1.0/spec/patches.md create mode 100644 docs/v1.1.0/spec/pof.md create mode 100644 docs/v1.1.0/spec/processing.md create mode 100644 docs/v1.1.0/spec/resolution.md create mode 100644 docs/v1.1.0/spec/terminology.md create mode 100644 docs/v1.1.0/spec/title.md create mode 100644 docs/v1.1.0/spec/topology.md create mode 100644 docs/v1.1.0/spec/versioning.md create mode 100644 docs/v1.1.0/styleguide.md create mode 100644 docs/v1.1.0/type-registry.md create mode 100644 www/api/v1.0.1/index.html create mode 100644 www/spec/v1.0.1/index.html diff --git a/CODEOWNERS b/CODEOWNERS index bfb730bd9..c36b410af 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,6 +2,6 @@ # the repo. Unless a later match takes precedence, # they will be requested for review when someone opens a # pull request. -* @thehenrytsai @tplooker @csuwildcat @troyronda @OR13 @xinaxu +* @thehenrytsai @tplooker @csuwildcat @troyronda @OR13 @xinaxu @decentralgabe # See CODEOWNERS syntax here: https://help.github.com/articles/about-codeowners/#codeowners-syntax \ No newline at end of file diff --git a/docs/api/content.md b/docs/v1.0.1/api/content.md similarity index 100% rename from docs/api/content.md rename to docs/v1.0.1/api/content.md diff --git a/docs/api/title.md b/docs/v1.0.1/api/title.md similarity index 100% rename from docs/api/title.md rename to docs/v1.0.1/api/title.md diff --git a/docs/bitcoin.md b/docs/v1.0.1/bitcoin.md similarity index 100% rename from docs/bitcoin.md rename to docs/v1.0.1/bitcoin.md diff --git a/docs/contribution-guidlines.md b/docs/v1.0.1/contribution-guidlines.md similarity index 100% rename from docs/contribution-guidlines.md rename to docs/v1.0.1/contribution-guidlines.md diff --git a/docs/core.md b/docs/v1.0.1/core.md similarity index 100% rename from docs/core.md rename to docs/v1.0.1/core.md diff --git a/docs/design.md b/docs/v1.0.1/design.md similarity index 100% rename from docs/design.md rename to docs/v1.0.1/design.md diff --git a/docs/spec/abstract.md b/docs/v1.0.1/spec/abstract.md similarity index 100% rename from docs/spec/abstract.md rename to docs/v1.0.1/spec/abstract.md diff --git a/docs/spec/appendix.md b/docs/v1.0.1/spec/appendix.md similarity index 100% rename from docs/spec/appendix.md rename to docs/v1.0.1/spec/appendix.md diff --git a/docs/spec/common-functions.md b/docs/v1.0.1/spec/common-functions.md similarity index 100% rename from docs/spec/common-functions.md rename to docs/v1.0.1/spec/common-functions.md diff --git a/docs/spec/context.md b/docs/v1.0.1/spec/context.md similarity index 100% rename from docs/spec/context.md rename to docs/v1.0.1/spec/context.md diff --git a/docs/spec/did-uri.md b/docs/v1.0.1/spec/did-uri.md similarity index 100% rename from docs/spec/did-uri.md rename to docs/v1.0.1/spec/did-uri.md diff --git a/docs/spec/file-structures.md b/docs/v1.0.1/spec/file-structures.md similarity index 100% rename from docs/spec/file-structures.md rename to docs/v1.0.1/spec/file-structures.md diff --git a/docs/spec/guidelines.md b/docs/v1.0.1/spec/guidelines.md similarity index 100% rename from docs/spec/guidelines.md rename to docs/v1.0.1/spec/guidelines.md diff --git a/docs/spec/intro.md b/docs/v1.0.1/spec/intro.md similarity index 100% rename from docs/spec/intro.md rename to docs/v1.0.1/spec/intro.md diff --git a/docs/spec/json-web-signatures.md b/docs/v1.0.1/spec/json-web-signatures.md similarity index 100% rename from docs/spec/json-web-signatures.md rename to docs/v1.0.1/spec/json-web-signatures.md diff --git a/docs/spec/method-versioning.md b/docs/v1.0.1/spec/method-versioning.md similarity index 100% rename from docs/spec/method-versioning.md rename to docs/v1.0.1/spec/method-versioning.md diff --git a/docs/spec/operations.md b/docs/v1.0.1/spec/operations.md similarity index 100% rename from docs/spec/operations.md rename to docs/v1.0.1/spec/operations.md diff --git a/docs/spec/parameters.md b/docs/v1.0.1/spec/parameters.md similarity index 100% rename from docs/spec/parameters.md rename to docs/v1.0.1/spec/parameters.md diff --git a/docs/spec/patches.md b/docs/v1.0.1/spec/patches.md similarity index 100% rename from docs/spec/patches.md rename to docs/v1.0.1/spec/patches.md diff --git a/docs/spec/pof.md b/docs/v1.0.1/spec/pof.md similarity index 100% rename from docs/spec/pof.md rename to docs/v1.0.1/spec/pof.md diff --git a/docs/spec/processing.md b/docs/v1.0.1/spec/processing.md similarity index 100% rename from docs/spec/processing.md rename to docs/v1.0.1/spec/processing.md diff --git a/docs/spec/resolution.md b/docs/v1.0.1/spec/resolution.md similarity index 100% rename from docs/spec/resolution.md rename to docs/v1.0.1/spec/resolution.md diff --git a/docs/spec/terminology.md b/docs/v1.0.1/spec/terminology.md similarity index 100% rename from docs/spec/terminology.md rename to docs/v1.0.1/spec/terminology.md diff --git a/docs/spec/title.md b/docs/v1.0.1/spec/title.md similarity index 100% rename from docs/spec/title.md rename to docs/v1.0.1/spec/title.md diff --git a/docs/spec/topology.md b/docs/v1.0.1/spec/topology.md similarity index 100% rename from docs/spec/topology.md rename to docs/v1.0.1/spec/topology.md diff --git a/docs/spec/versioning.md b/docs/v1.0.1/spec/versioning.md similarity index 100% rename from docs/spec/versioning.md rename to docs/v1.0.1/spec/versioning.md diff --git a/docs/styleguide.md b/docs/v1.0.1/styleguide.md similarity index 100% rename from docs/styleguide.md rename to docs/v1.0.1/styleguide.md diff --git a/docs/type-registry.md b/docs/v1.0.1/type-registry.md similarity index 100% rename from docs/type-registry.md rename to docs/v1.0.1/type-registry.md diff --git a/docs/v1.1.0/api/content.md b/docs/v1.1.0/api/content.md new file mode 100644 index 000000000..18d3b7f8a --- /dev/null +++ b/docs/v1.1.0/api/content.md @@ -0,0 +1,157 @@ +## REST API + +The following sections define the Sidetree resolution and operations endpoints. Please refer to the companion [Sidetree REST API](https://identity.foundation/sidetree/swagger/) specification for additional information, as well as REST API definitions for the anchoring and CAS components. + +### Sidetree Resolution + +Sidetree resolution requests to the REST API are based on the [DID Resolution HTTP(S) binding](https://w3c-ccg.github.io/did-resolution/#bindings-https). +Resolution requests consist of a DID and MAY include DID parameters. +As detailed in [Resolution](#resolution), the resolution request MAY include the initial state DID parameter. + +The server responds with the [DID Resolution Result](https://w3c-ccg.github.io/did-resolution/#did-resolution-result) composed of the DID Document and Method Metadata. +Sidetree defines `published`, `updateCommitment`, and `recoveryCommitment` method metadata. + - `published` is detailed in [Published Property](#published-property). + - `updateCommitment` is the commitment for the next update operation as defined in [commitment schemes](https://identity.foundation/sidetree/spec/#commitment-schemes). + - `recoveryCommitment` is the commitment for the next recover or deactivate operation as defined in [commitment schemes](https://identity.foundation/sidetree/spec/#commitment-schemes). + +::: example +```json +{ + "@context": "https://w3id.org/did-resolution/v1", + "didDocument": DID_DOCUMENT_OBJECT, + "didDocumentMetadata": { + "method": { + "published": boolean, + "updateCommitment": UPDATE_COMMITMENT, + "recoveryCommitment": RECOVERY_COMMITMENT + } + } +} +``` +::: + +A resolution is requested as follows: + +1. The client ****MUST**** send a GET to the Sidetree resolution endpoint `/identifiers/{did-with-or-without-initial-state}` under the desired REST server path. +2. If the DID does not exist and initial state was not provided: + - The server ****MUST**** respond with HTTP Status Code 404. +3. If the DID does not exist and valid initial state was provided: + - The server ****MUST**** respond with HTTP Status Code 200. + - The server ****MUST**** include the `didDocument` property, with its value set to the initial DID document that is constructed from the initial state. + - The server ****MUST**** include the resolution response object `didDocumentMetadata` composed of a `method` object, which includes a `published` property with value `false`. +4. If the DID does exist and has not been deactivated: + - The server ****MUST**** respond with HTTP Status Code 200. + - The server ****MUST**** include the `didDocument` property, with its value set to the latest DID document. + - The server ****MUST**** include the resolution response object `didDocumentMetadata` composed of a `method` object which includes a `published` property with value `true`. +5. If the DID does exist and has been deactivated: + - The server ****MUST**** respond with HTTP Status Code 200. + - The server ****MUST**** include the `didDocument` property, with its value set to a valid empty DID document including the populated `id` property. + - The server ****MUST**** include the resolution response object `didDocumentMetadata` which includes a `deactivated` property with value `true`. +6. Otherwise, for failure, the server ****MUST**** respond with an appropriate HTTP Status Code (400, 401, 404, 500). + +### Sidetree Operations + +Sidetree operation requests to the REST API consist of a type property indicating the operation to be performed along with operation-specific properties and data. + +::: example +```json +{ + "type": OPERATION_TYPE, + ... +} +``` +::: + +A valid Sidetree Operation Request is a JSON document composed as follows: + +1. The Operation Request ****MUST**** contain a `type` property, and its value ****MUST**** be a valid operation defined in +[File Structure](#file-structures). The defined operations are `create`, `recover`, `deactivate`, `update`. +2. Populate additional properties according to the appropriate subsection. +3. The client ****MUST**** POST the Operation Request JSON document to the Sidetree operation endpoint `/operations` under the desired REST server path. +4. The server ****MUST**** respond with HTTP status 200 when successful. Otherwise, for failure, the server ****MUST**** respond with an appropriate HTTP Status Code (400, 401, 404, 500). + - In the case of a successful `create` operation, the server ****MUST**** return the DID Resolution Result for the DID as is detailed in [Sidetree Resolution](#sidetree-resolution). + +#### Create + +::: example +```json +{ + "type": "create", + "suffixData": SUFFIX_DATA_OBJECT, + "delta": DELTA_OBJECT +} +``` +::: + +Use the following process to generate a Sidetree create operation JSON document for the REST API, composed as follows: + +1. The object ****MUST**** contain a `type` property, and its value ****MUST**** be `create`. +2. The object ****MUST**** contain a `suffixData` property, and its value must be a _Suffix Data Object_(#core-index-file-create-entry). +3. The object ****MUST**** contain an `delta` property, and its value must be a [_Create Operation Data Object_](#create-data-object). + +#### Update + +::: example +```json +{ + "type": "update", + "didSuffix": SUFFIX_STRING, + "revealValue": REVEAL_VALUE, + "delta": DELTA_OBJECT, + "signedData": JWS_SIGNED_VALUE +} +``` +::: + +Use the following process to generate a Sidetree update operation JSON document for the REST API, composed as follows: + +1. The object ****MUST**** contain a `type` property, and its value ****MUST**** be `update`. +1. The object ****MUST**** contain a `didSuffix` property, and its value ****MUST**** be the [DID Suffix](#did-suffix) of the DID the operation pertains to. +1. The object ****MUST**** contain a `revealValue` property, and its value ****MUST**** be the [reveal value](https://identity.foundation/sidetree/spec/#default-parameters) of the DID the operation pertains to. +1. The object ****MUST**** contain an `delta` property, and its value ****MUST**** be an [_Update Operation Delta Object_](#update-data-object). +1. The object ****MUST**** contain a `signedData` property, and its value ****MUST**** be an [IETF RFC 7515](https://tools.ietf.org/html/rfc7515) compliant JWS Compact +Serialization of the Update operation as defined in [Provisional Index File](https://identity.foundation/sidetree/spec/#provisional-index-file). + +#### Recover + +::: example +```json +{ + "type": "recover", + "didSuffix": SUFFIX_STRING, + "revealValue": REVEAL_VALUE, + "delta": DELTA_OBJECT, + "signedData": JWS_SIGNED_VALUE +} +``` +::: + +Use the following process to generate a Sidetree recovery operation JSON document for the REST API, composed as follows: + +1. The object ****MUST**** contain a `type` property, and its value ****MUST**** be `recover`. +1. The object ****MUST**** contain a `didSuffix` property, and its value ****MUST**** be the [DID Suffix](#did-suffix) of the DID the operation pertains to. +1. The object ****MUST**** contain a `revealValue` property, and its value ****MUST**** be the [reveal value](https://identity.foundation/sidetree/spec/#default-parameters) of the DID the operation pertains to. +1. The object ****MUST**** contain an `delta` property, and its value ****MUST**** be a [_Recovery Operation Delta Object_](#recover-delta-object). +1. The object ****MUST**** contain a `signedData` property, and its value ****MUST**** be an [IETF RFC 7515](https://tools.ietf.org/html/rfc7515) compliant JWS Compact +Serialization of the Recovery operation as defined in [Core Index File](https://identity.foundation/sidetree/spec/#core-index-file). + +#### Deactivate + +::: example +```json +{ + "type": "deactivate", + "didSuffix": SUFFIX_STRING, + "revealValue": REVEAL_VALUE, + "signedData": JWS_SIGNED_VALUE +} +``` +::: + +Use the following process to generate a Sidetree deactivate operation JSON document for the REST API, composed as follows: + +1. The object ****MUST**** contain a `type` property, and its value ****MUST**** be `deactivate`. +1. The object ****MUST**** contain a `didSuffix` property, and its value ****MUST**** be the [DID Suffix](#did-suffix) of the DID the operation pertains to. +1. The object ****MUST**** contain a `revealValue` property, and its value ****MUST**** be the [reveal value](https://identity.foundation/sidetree/spec/#default-parameters) of the DID the operation pertains to. +1. The object ****MUST**** contain a `signedData` property, and its value ****MUST**** be an [IETF RFC 7515](https://tools.ietf.org/html/rfc7515) compliant JWS Compact +Serialization of the Deactivate operation as defined in [Core Index File](https://identity.foundation/sidetree/spec/#core-index-file). diff --git a/docs/v1.1.0/api/title.md b/docs/v1.1.0/api/title.md new file mode 100644 index 000000000..ec855a838 --- /dev/null +++ b/docs/v1.1.0/api/title.md @@ -0,0 +1,28 @@ +Sidetree REST API +================== + +**Specification Status:** Editor's Draft + +**Latest published version:** + [identity.foundation/sidetree/api](https://identity.foundation/sidetree/api) + +**Editors:** +~ [Troy Ronda](https://www.linkedin.com/in/troyronda/) (SecureKey) +~ [Henry Tsai](https://www.linkedin.com/in/henry-tsai-6b884014/) (Microsoft) + +**Contributors:** +~ [Mudassir Ali](https://www.linkedin.com/in/mudassir-ali-4981654/) (Microsoft) +~ [Isaac Chen](https://www.linkedin.com/in/isaac-chen-921079127/) (Microsoft) +~ [Kyle Den Hartog](https://www.linkedin.com/in/kyledenhartog/) (Mattr) +~ [Daniel Buchner](https://www.linkedin.com/in/dbuchner/) (Microsoft) +~ [Orie Steele](https://www.linkedin.com/in/or13b/) (Transmute) + +**Participate:** +~ [GitHub repo](https://github.com/decentralized-identity/sidetree) +~ [File a bug](https://github.com/decentralized-identity/sidetree/issues) +~ [Commit history](https://github.com/decentralized-identity/sidetree/commits/master) + +**Sidetree protocol specification:** + [identity.foundation/sidetree/spec](https://identity.foundation/sidetree/spec) + +------------------------------------ \ No newline at end of file diff --git a/docs/v1.1.0/bitcoin.md b/docs/v1.1.0/bitcoin.md new file mode 100644 index 000000000..3cca37e0c --- /dev/null +++ b/docs/v1.1.0/bitcoin.md @@ -0,0 +1,77 @@ +# Bitcoin Blockchain Service Reference Implementation + + +## Value Time Lock + +### Protocol parameters + +| Protocol parameters | Description | +| ------------------------------------ | ---------------------------------------------------------| +| valueTimeLockDurationInBlocks | The duration which a value time lock is required to have | + +### Configuration parameters +* valueTimeLockUpdateEnabled + +This parameter controls whether the value time lock is actively being renewed and if the funds will be returned to wallet in case of `valueTimeLockAmountInBitcoins` being set to zero. When this parameter is set to `false`, parameters `valueTimeLockAmountInBitcoins`, `valueTimeLockPollPeriodInSeconds` and `valueTimeLockTransactionFeesAmountInBitcoins` will be ignored. + +* valueTimeLockAmountInBitcoins + +The desired fund locked to write larger operation batches. Set to 0 will causes existing locked fund (if exists) to be released back to wallet upon lock expiry. + +* valueTimeLockPollPeriodInSeconds + +The polling duration between checks to see if the value time lock needs to be re-locked or released back to wallet. + +* valueTimeLockTransactionFeesAmountInBitcoins + +The fund allocated for transaction fees for subsequent re-locking of the initial value time lock. + +> Developer's note: +This allotted amount is locked together with value time lock for simplicity of re-lock implementation. If this allotted amount is depleted due to subsequent re-locks, the remaining locked amount will be released back to wallet, and a new lock will be created with this allotted amount added to it again. + +## Events + +### `bitcoin_databases_revert` +Occurs every time the databases are reverted due to a bitcoin reorg. + +Event data: +```json +{ + "blockHeight": "The block height that the databases are reverted to.", +} +``` + +### `bitcoin_lock_monitor_lock_released` +Occurs every time the lock monitor releases a value lock. + +Event data: none + +### `bitcoin_lock_monitor_lock_renewed` +Occurs every time the lock monitor renews an existing lock. + +Event data: none + +### `bitcoin_lock_monitor_new_lock` +Occurs every time the lock monitor creates a new lock. + +Event data: none + +### `bitcoin_lock_monitor_loop_failure` +Occurs every time the lock monitor loop fails. + +Event data: none + +### `bitcoin_lock_monitor_loop_success` +Occurs every time the lock monitor loop succeeds. + +Event data: none + +### `bitcoin_observing_loop_failure` +Occurs every time the bitcoin observing loop fails. + +Event data: none + +### `bitcoin_observing_loop_success` +Occurs every time the bitcoin observing loop succeeds. + +Event data: none diff --git a/docs/v1.1.0/contribution-guidlines.md b/docs/v1.1.0/contribution-guidlines.md new file mode 100644 index 000000000..9760f2845 --- /dev/null +++ b/docs/v1.1.0/contribution-guidlines.md @@ -0,0 +1,72 @@ +# Contribution Guidelines + +The following document covers the contribution guidelines for the artifacts in this repository. + +## Commit message format + +A well formed commit message communicates context about a change. A diff will tell you what changed. A well cared for +commit log is a beautiful and useful thing. + +What may be a hassle at first soon becomes habit, and eventually a source of pride and productivity for all +involved. From reviews to maintenance it's a powerful tool. Understanding why something happened months or years ago +becomes not only possible but efficient. + +We rely on consistent commit messages as we use +[conventional-changelog](https://github.com/conventional-changelog/conventional-changelog) which automatically generates +the changelog diff based on the commit messages + +We enforce well formed commit messages with pre-commit hooks using [husky](https://github.com/typicode/husky). + +The following guidelines are based on the angular +team's [contribution guide](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines). +Checkout [commitizen](https://www.npmjs.com/package/commitizen) and [commitlint.io](https://commitlint.io/) for +assistance in how it works. + +In general the commit message **MUST** be of the following form to validate + +``` +type(scope): subject +``` + +Where the `type` must be one of the following, indicating the type of change being made by the commit. + +* **build**: Changes that affect the build system or external dependencies (example scopes: gulp, broccoli, npm) +* **ci**: Changes to our CI configuration files and scripts (example scopes: Travis, Circle, BrowserStack, SauceLabs) +* **docs**: Documentation only changes +* **feat**: A new feature +* **fix**: A bug fix +* **perf**: A code change that improves performance +* **refactor**: A code change that neither fixes a bug nor adds a feature +* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +* **test**: Adding missing tests or correcting existing tests + +The `scope` defines what is being changed, in this repository the scope **MUST** be one of the following + +* **spec**: Changes being made to the Sidetree specification +* **ref-imp**: Changes being made to the Sidetree reference implementation + +The `subject` should be a short descriptive statement describing the nature of the change made by the commit. + +Full examples + +``` +feat(ref-imp): add fee calculation algorithm +``` + +or + +``` +fix(spec): ambiguity around update operation terminology +``` + +### Breaking changes + +When your commit features a breaking change, the commit body should feature `BREAKING CHANGE: ` so that these are noted correctly in the resulting changelog. + +### Helper script + +A helper scripts of `commit` is included in the `package.json` to aid in making well formed commit messages, when you are ready to commit changes simply run the following and follow the prompts + +``` +npm run commit +``` \ No newline at end of file diff --git a/docs/v1.1.0/core.md b/docs/v1.1.0/core.md new file mode 100644 index 000000000..ffde835d0 --- /dev/null +++ b/docs/v1.1.0/core.md @@ -0,0 +1,752 @@ +# Sidetree Core Node.js Reference Implementation Document + +This document focuses on the Node.js reference implementation of the Sidetree specification. + +## Overview + +![Architecture diagram](/www/diagrams/architecture.png) + +## Node Types + +There will exist several Sidetree node configurations, which offer a variety of modes that support different features and trade-offs. The choice to run one type or another largely depends on the type of user, machine, and intent the operator has in mind. + +### Full Node + +A full node offers the largest set of features and highest resolution performance of DIDs, but also requires more significant bandwidth, hardware, storage, and system resource consumption to operate. A full node will attempt to fetch and retain all data associated with the Sidetree operations present in the target system. As such, full nodes are able to quickly resolve DID lookup requests and may feature more aggressive caching of DID state than other node configurations. + +### Light Node + +A light node is a node that retains the ability to independently resolve DIDs without relying on a trusted party or trusted assertions by other nodes, while minimizing the amount of bandwidth and data required to do so. Light nodes fetch and maintain only the minimum Sidetree data required to create an independent DID-indexed lookup table that enables just-in-time resolution of DIDs. + +> NOTE: Light node support is not yet implemented. + +## Observer + +The _Observer_ watches the target anchoring system to identify Sidetree operations, then parses the operations into data structures that can be used for efficient DID resolutions. +The _Observer_ defers heavy processing such as signature validations to the time of DID resolution. + +## Versioning +As the Sidetree protocol evolves, existing nodes executing an earlier version of the protocol need to upgrade to execute the newer version of the protocol while remaining backward compatible to processing of prior transactions and operations. + +### Protocol Versioning Configuration +The implementation exposes a JSON configuration file with the following schema for specifying protocol version progressions: +```json +[ + { + "startingBlockchainTime": "An inclusive number that indicates the time this version takes effect.", + "version": "The name of the folder that contains all the code specific to this protocol version." + } +] +``` + +Protocol versioning configuration file example: +```json +[ + { + "startingBlockchainTime": 1500000, + "version": "0.4.0" + }, + { + "startingBlockchainTime": 2000000, + "version": "0.5.0" + } +] +``` + +![Versioning diagram](/www/diagrams/versioning.png) + +### Orchestration Layer +There are a number of top-level components (classes) that orchestrate the execution of multiple versions of protocol simultaneously at runtime. These components are intended to be independent from version specific changes. Since code in this orchestration layer need to be compatible with all Sidetree versions, the orchestration layer should be kept as thin as possible. + +- Version Manager - This component handles construction and fetching of implementations of Sidetree versions as needed. +- Batch Scheduler - This component schedules the writing of new operation batches. +- Observer - This component observes the incoming Sidetree transactions and processes them. +- Resolver - This component resolves a DID resolution request. + +The orchestration layer cannot depend on any code that is Sidetree version specific, this means its dependencies must either be external or be part of the orchestration layer itself, such dependencies include: +- Blockchain Client +- CAS (Content Addressable Storage) Client +- MongoDB Transaction Store +- MongoDB Operation Store + +### Protocol Version Specific Components +The orchestration layer requires implementation of following interfaces per protocol version: +- `IBatchWriter` - Performs operation batching, batch writing to CAS, and transaction writing to blockchain. Used by the _Batch Scheduler_. +- `ITransactionProcessor` - Used by the _Observer_ to perform processing of a transaction written in a particular protocol version. +- `IOperationProcessor` - Used by the _Resolver_ to apply an operation written in a particular protocol version. +- `IRequestHandler` - Handles REST API requests. + + +## Core Service REST API + +### REST API HTTP Response status codes + +| HTTP status code | Description | +|------------------|------------------------------------------| +| 200 | Everything went well. | +| 400 | Bad client request. | +| 401 | Unauthenticated or unauthorized request. | +| 404 | Resource not found. | +| 500 | Server error. | + + +The Core Service REST API implements the [Sidetree REST API](https://identity.foundation/sidetree/api/), in addition it also exposes the following version API. + +### Fetch the current service versions. +Fetches the current version of the core and the dependent services. The service implementation defines the versioning scheme and its interpretation. + +Returns the service _names_ and _versions_ of the core and the dependent blockchain and CAS services. + +> NOTE: This API does **NOT** return the protocol version. This just represents the version of the current service(s) itself. + +#### Request path +``` +GET /version +``` + +#### Request headers +None. + +#### Request example +``` +GET /version +``` + +#### Response body schema +```json +[ + { + "name": "A string representing the name of the service", + "version": "A string representing the version of currently running service." + }, + ... +] +``` + +#### Response example +```http +HTTP/1.1 200 OK + +[ + { + "name":"core", + "version":"0.4.1" + }, + { + "name":"bitcoin", + "version":"0.4.1" + }, + { + "name":"ipfs", + "version":"0.4.1" + } +] +``` + + + + +## Blockchain REST API +The blockchain REST API interface is used by the Core service and aims to abstract the underlying blockchain away from the main protocol logic. This allows the underlying blockchain to be replaced without affecting the core protocol logic. The interface also allows the protocol logic to be implemented in an entirely different language while interfacing with the same blockchain. + +### Get latest blockchain time +Gets the latest logical blockchain time. This API allows the Observer and Batch Writer to determine protocol version to be used. + +A _blockchain time hash_ **must not** be predictable/pre-computable, a canonical implementation would be to use the _block number_ as the time and the _block hash_ as the _time hash_. It is intentional that the concepts related to _blockchain blocks_ are hidden from the layers above. + +#### Request path +``` +GET /time +``` + +#### Request headers +None. + +#### Request body schema +None. + +#### Request example +``` +GET /time +``` + +#### Response body schema +```json +{ + "time": "The logical blockchain time.", + "hash": "The hash associated with the blockchain time." +} +``` + +#### Response body example +```json +{ + "time": 545236, + "hash": "0000000000000000002443210198839565f8d40a6b897beac8669cf7ba629051" +} +``` + + + +### Get blockchain time by hash +Gets the time identified by the time hash. + +#### Request path +``` +GET /time/ +``` + +#### Request headers +None. + +#### Request body schema +None. + +#### Request example +``` +GET /time/0000000000000000002443210198839565f8d40a6b897beac8669cf7ba629051 +``` + +#### Response body schema +```json +{ + "time": "The logical blockchain time.", + "hash": "The hash associated with the blockchain time, must be the same as the value given in query path." +} +``` + +#### Response body example +```json +{ + "time": 545236, + "hash": "0000000000000000002443210198839565f8d40a6b897beac8669cf7ba629051" +} +``` + + + +### Fetch Sidetree transactions +Fetches Sidetree transactions in chronological order. + +> Note: The call may not to return all Sidetree transactions in one batch, in which case the caller can use the transaction number of the last transaction in the returned batch to fetch subsequent transactions. + +#### Request path +``` +GET /transactions?since=&transaction-time-hash= +``` + +#### Request headers +None. + + +#### Request query parameters +- `since` + + Optional. A transaction number. When not given, all Sidetree transactions since inception will be returned. + When given, only Sidetree transactions after the specified transaction will be returned. + +- `transaction-time-hash` + + Optional, but MUST BE given if `since` parameter is specified. + + This is the hash associated with the time the transaction specified by the `since` parameter is anchored on blockchain. + Multiple transactions can have the same _transaction time_ and thus the same _transaction time hash_. + + The _transaction time hash_ helps the blockchain layer detect block reorganizations (temporary forks); `HTTP 400 Bad Request` with `invalid_transaction_number_or_time_hash` as the `code` parameter value in a JSON body is returned on such events. + +#### Request example +``` +GET /transactions?since=89&transaction-time-hash=0000000000000000002443210198839565f8d40a6b897beac8669cf7ba629051 +``` + +#### Response body schema +The transactions array must always end with a complete block of data, but can start in the middle of a block if `since` query parameter is provided. +```json +{ + "moreTransactions": "True if there are more transactions beyond the returned batch. False otherwise.", + "transactions": [ + { + "transactionNumber": "A monotonically increasing number (need NOT be by 1) that identifies a Sidetree transaction.", + "transactionTime": "The logical blockchain time this transaction is anchored. Used for protocol version selection.", + "transactionTimeHash": "The hash associated with the transaction time.", + "anchorString": "The string written to the blockchain for this transaction.", + "transactionFeePaid": "A number representing the fee paid for this transaction.", + "normalizedTransactionFee": "A number representing the normalized transaction fee used for proof-of-fee calculation.", + "writer": "A string representing the writer of the transaction. Used in the value time lock calculations." + }, + ... + ] +} +``` + +#### Response example +```http +HTTP/1.1 200 OK + +{ + "moreTransactions": false, + "transactions": [ + { + "transactionNumber": 89, + "transactionTime": 545236, + "transactionTimeHash": "0000000000000000002443210198839565f8d40a6b897beac8669cf7ba629051", + "anchorString": "QmWd5PH6vyRH5kMdzZRPBnf952dbR4av3Bd7B2wBqMaAcf", + "transactionFeePaid": 40000, + "normalizedTransactionFee": 100, + "writer": "0af7eccefa3aaa37421914923b4a2034ed5a0ad0" + }, + { + "transactionNumber": 100, + "transactionTime": 545236, + "transactionTimeHash": "0000000000000000002443210198839565f8d40a6b897beac8669cf7ba629051", + "anchorString": "QmbJGU4wNti6vNMGMosXaHbeMHGu9PkAUZtVBb2s2Vyq5d", + "transactionFeePaid": 600000, + "normalizedTransactionFee": 400, + "writer": "0af7eccefa3aaa37421782523b4a2034ed5a0ad0" + } + ] +} +``` + +#### Response example - Block reorganization detected + +```http +HTTP/1.1 400 Bad Request + +{ + "code": "invalid_transaction_number_or_time_hash" +} +``` + + +### Get first valid Sidetree transaction +Given a list of Sidetree transactions, returns the first transaction in the list that is valid. Returns 404 NOT FOUND if none of the given transactions are valid. This API is primarily used by the Sidetree core library to determine a transaction that can be used as a marker in time to reprocess transactions in the event of a block reorganization (temporary fork). + + +#### Request path +```http +POST /transactions/firstValid HTTP/1.1 +``` + +#### Request headers +| Name | Value | +| --------------------- | ---------------------- | +| ```Content-Type``` | ```application/json``` | + +#### Request body schema +```json +{ + "transactions": [ + { + "transactionNumber": "The transaction to be validated.", + "transactionTime": "The logical blockchain time this transaction is anchored. Used for protocol version selection.", + "transactionTimeHash": "The hash associated with the transaction time.", + "anchorString": "The string written to the blockchain for this transaction.", + "transactionFeePaid": "A number representing the fee paid for this transaction.", + "normalizedTransactionFee": "A number representing the normalized transaction fee used for proof-of-fee calculation.", + "writer": "A string representing the writer of the transaction. Used in the value time lock calculations." + }, + ... + ] +} +``` + +#### Request example +```http +POST /transactions/firstValid HTTP/1.1 +Content-Type: application/json + +{ + "transactions": [ + { + "transactionNumber": 19, + "transactionTime": 545236, + "transactionTimeHash": "0000000000000000002352597f8ec45c56ad19994808e982f5868c5ff6cfef2e", + "anchorString": "Qm28BKV9iiM1ZNzMsi3HbDRHDPK5U2DEhKpCYhKk83UPEg", + "transactionFeePaid": 5000, + "normalizedTransactionFee": 100, + "writer": "0af7eccefa3aaa37421914923b4a2034ed5a0ad0" + }, + { + "transactionNumber": 18, + "transactionTime": 545236, + "transactionTimeHash": "0000000000000000000054f9719ef6ca646e2503a9c5caac1c6ea95ffb4af587", + "anchorString": "Qmb2wxUwvEpspKXU4QNxwYQLGS2gfsAuAE9LPcn5LprS1nb", + "transactionFeePaid": 30, + "normalizedTransactionFee": 10, + "writer": "0af7eccefa3aaa37421782523b4a2034ed5a0ad0" + + }, + { + "transactionNumber": 16, + "transactionTime": 545200, + "transactionTimeHash": "0000000000000000000f32c84291a3305ad9e5e162d8cc363420831ecd0e2800", + "anchorString": "QmbBPdjWSdJoQGHbZDvPqHxWqqeKUdzBwMTMjJGeWyUkEzK", + "transactionFeePaid": 50000, + "normalizedTransactionFee": 150, + "writer": "0af7eccefa3aaa87421782523b4a2034ed5a0ad0" + }, + { + "transactionNumber": 12, + "transactionTime": 545003, + "transactionTimeHash": "0000000000000000001e002080595267fe034d370897b7b506d119ad29da1541", + "anchorString": "Qmss3gKdm9uU9YLx3MPRHQTcUq1CR1Xv9Zpdu7EBG9Pk9Y", + "transactionFeePaid": 1000000, + "normalizedTransactionFee": 200, + "writer": "0af7eccefa3aaa87421782523b4a2034e23jdad0" + }, + { + "transactionNumber": 4, + "transactionTime": 544939, + "transactionTimeHash": "00000000000000000000100158f474719e5a319933856f7f464fcc65a3cb2253", + "anchorString": "QmdcDrVPWy3ZXoZcuvFq7fDVqatks22MMqPAxDqXsZzGhy" + "transactionFeePaid": 100, + "normalizedTransactionFee": 50, + "writer": "0af7asdifa3aaa87421782523b4a2034ed5a0ad0" + } + ] +} +``` + +#### Response body schema +```json +{ + "transactionNumber": "The transaction number of the first valid transaction in the given list", + "transactionTime": "The logical blockchain time this transaction is anchored. Used for protocol version selection.", + "transactionTimeHash": "The hash associated with the transaction time.", + "anchorString": "The string written to the blockchain for this transaction.", + "transactionFeePaid": "A number representing the fee paid for this transaction.", + "normalizedTransactionFee": "A number representing the normalized transaction fee used for proof-of-fee calculation.", + "writer": "A string representing the writer of the transaction. Used in the value time lock calculations." +} +``` + +#### Response example +```http +HTTP/1.1 200 OK + +{ + "transactionNumber": 16, + "transactionTime": 545200, + "transactionTimeHash": "0000000000000000000f32c84291a3305ad9e5e162d8cc363420831ecd0e2800", + "anchorString": "QmbBPdjWSdJoQGHbZDvPqHxWqqeKUdzBwMTMjJGeWyUkEzK", + "transactionFeePaid": 50000, + "normalizedTransactionFee": 50, + "writer": "0af7eccefa3aaa87421782523b4a2034e23jdad0" +} +``` + +#### Response example - All transactions are invalid +```http +HTTP/1.1 404 NOT FOUND +``` + + +### Write a Sidetree transaction +Writes a Sidetree transaction to the underlying blockchain. + +Returns `HTTP 400 Bad Request` with the following values as the `code` parameter in the JSON body: + +| Code | Description | +| ------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| spending_cap_per_period_reached | if with the given fee (derived from minimumFee) this node will exceed the spending limit as configured in the parameters. | +| not_enough_balance_for_write | if the wallet configured in the parameters does not have enough balance to complete the write operation. | + +#### Request path +``` +POST /transactions +``` + +#### Request headers +| Name | Value | +| --------------------- | ---------------------- | +| ```Content-Type``` | ```application/json``` | + +#### Request body schema +```json +{ + "minimumFee": "A number representing the minimum transaction fee to be paid to write this transaction to the blockchain. The actual fee is dynamically calculated and can be higher than this amount (but not lower).", + "anchorString": "The string to be written to the blockchain for this transaction." +} +``` + +#### Request example +```http +POST /transactions HTTP/1.1 + +{ + "minimumFee": 200000, + "anchorString": "QmbJGU4wNti6vNMGMosXaHbeMHGu9PkAUZtVBb2s2Vyq5d" +} +``` + +#### Response body schema +None. + + +### Fetch normalized transaction fee for proof-of-fee calculation. +Fetches the normalized transaction fee used for proof-of-fee calculation, given the blockchain time. + +Returns `HTTP 400 Bad Request` with `blockchain_time_out_of_range` as the `code` parameter value in the JSON body if the given blockchain time is: +1. earlier than the genesis Sidetree blockchain time; or +1. later than the blockchain time of the latest block that the service has processed. + +Returns `HTTP 500 Internal Server Error` with `normalized_fee_cannot_be_computed` as the `code` parameter value in the JSON body if the server is unable to compute the normalized fee. +#### Request path +``` +GET /fee +``` + +#### Request path +``` +GET /fee/ +``` + +#### Request headers +None. + +#### Request example +``` +GET /fee/654321 +``` + +#### Response body schema +```json +{ + "normalizedTransactionFee": "A number representing the normalized transaction fee used for proof-of-fee calculation." +} +``` + +#### Response example +```http +HTTP/1.1 200 OK + +{ + "normalizedTransactionFee": 200000 +} +``` + +#### Response example - Blockchain time given is out of computable range. +```http +HTTP/1.1 400 Bad Request +{ + "code": "blockchain_time_out_of_range" +} +``` + +#### Response example - Error while computing the normalized fee. +```http +HTTP/1.1 500 Internal Server Error + +{ + "code": "normalized_fee_cannot_be_computed" +} +``` + +### Fetch the lock object for value-time-lock calculation. +Fetches the lock object used for value-time-lock calculation, given the lock identifier. + +Returns `HTTP 404 Not Found` with `value_time_lock_not_found` as the `code` parameter value in the JSON body if there was no lock found for the given lock identifier. + +#### Request path +``` +GET /locks/ +``` + +#### Request headers +None. + +#### Request example +``` +GET /locks/gHasdfasodf23230o0jlk23323 +``` + +#### Response body schema +```json +{ + "amountLocked": "A number representing the amount that was locked.", + "identifier": "The string representing the identifier of the lock. This is the same value which is passed in the request path.", + "lockTransactionTime": "A number representing the transaction time at which the lock became active.", + "owner": "A string representing the owner of the lock.", + "unlockTransactionTime": "A number representing the transaction time at which the lock became inactive." +} +``` + +#### Response example +```http +HTTP/1.1 200 OK + +{ + "amountLocked": 1235696 + "identifier": "gHasdfasodf23230o0jlk23323", + "lockTransactionTime": 167520, + "owner": "Hhdofkeio209aanoiyyoiknadfsedsed652", + "unlockTransactionTime": 167530 +} +``` + +#### Response example - Lock not found. +```http +HTTP/1.1 404 Not Found +{ + "code": "value_time_lock_not_found" +} +``` + +### Fetch the writer lock object used for batch writing. +Fetches the currently active writer lock object written on the blockchain by the Blockchain service. This is used for batch writing. + +Returns `HTTP 404 Not Found` with the following values as the `code` parameter in the JSON body: + +| Code | Description | +| --------------------------------- | ---------------------------------------------------------------| +| value_time_lock_not_found | if there is no active lock on the blockchain. | +| value_time_lock_in_pending_state | if there is a lock but is not confirmed on the blockchain yet. | + +#### Request path +``` +GET /writerlock +``` + +#### Request headers +None. + +#### Request example +``` +GET /writerlock +``` + +#### Response body schema +```json +{ + "amountLocked": "A number representing the amount that was locked.", + "identifier": "The string representing the identifier of the lock.", + "lockTransactionTime": "A number representing the transaction time at which the lock became active.", + "owner": "A string representing the owner of the lock.", + "unlockTransactionTime": "A number representing the transaction time at which the lock became inactive." +} +``` + +#### Response example +```http +HTTP/1.1 200 OK + +{ + "amountLocked": 1235696 + "identifier": "gHasdfasodf23230o0jlk23323", + "lockTransactionTime": 167520, + "owner": "Hhdofkeio209aanoiyyoiknadfsedsed652", + "unlockTransactionTime": 167530 +} +``` + +#### Response example - Lock not found. +```http +HTTP/1.1 404 Not Found +{ + "code": "value_time_lock_not_found" +} +``` + +#### Response example - Lock not yet confirmed. +```http +HTTP/1.1 404 Not Found +{ + "code": "value_time_lock_in_pending_state" +} +``` + +### Fetch the current service version +Fetches the current version of the service. The service implementation defines the versioning scheme and its interpretation. + +Returns the service _name_ and _version_ of the blockchain service. + +#### Request path +``` +GET /version +``` + +#### Request headers +None. + +#### Request example +``` +GET /version +``` + +#### Response body schema +```json +{ + "name": "A string representing the name of the service", + "version": "A string representing the version of currently running service." +} +``` + +#### Response example +```http +HTTP/1.1 200 OK + +{ + "name": "bitcoin", + "version": "1.0.0" +} +``` + +## Core Service Events + +### `sidetree_batch_writer_loop_failure` +Occurs every time the batch writer fails a processing loop. + +Event data: +```json +{ + "code": "Error code of the failure. Dependent on blockchain service implementation." +} +``` + +Event data: none + +### `sidetree_batch_writer_loop_success` +Occurs every time the batch writer completes a processing loop. + +Event data: +```json +{ + "batchSize": "The size of the batch written." +} +``` + +### `sidetree_blockchain_time_changed` +Occurs every time the underlying blockchain time changes. + +Event data: +```json +{ + "time": "The logical blockchain time as an integer." +} +``` + +### `sidetree_download_manager_download` +Occurs every time an asynchronous content download has occurred regardless of success. + +Event data: +```json +{ + "code": "The download result code." +} +``` + +### `sidetree_observer_block_reorganization` +Occurs every time the observer detects a block reorganization. + +Event data: none + +### `sidetree_observer_loop_failure` +Occurs every time the observer fails a processing loop. + +Event data: none + +### `sidetree_observer_loop_success` +Occurs every time the observer completes a processing loop. + +Event data: none diff --git a/docs/v1.1.0/design.md b/docs/v1.1.0/design.md new file mode 100644 index 000000000..ac8a00925 --- /dev/null +++ b/docs/v1.1.0/design.md @@ -0,0 +1,65 @@ +# DEPRECATED HISTORICAL DOCUMENT - DO NOT USE + +OFFICIAL SIDETREE SPECIFICATION HERE: https://identity.foundation/sidetree/spec/ + + +## DDoS Attack & Mitigation + +Given that Sidetree is designed to enable operations to be performed at large volumes with cheap unit costs, DDoS is a real threat to the system. + +Without any mitigation strategy, malicious but specification adherent nodes can create and broadcast operation batches that are not intended for any other purpose than to force other observing nodes to process their operations in accordance with the specification. + +Sidetree specification defines the following mechanisms to enable scaling, while preventing DDoS attacks: + +#### Rate limiting + + To prevent spam attack causing transaction and operation stores to grow at an unhealthy rate, 2 types of rate limiting is put in place. + + 1. writer rate limiting + Each writer is allowed 1 transaction per transaction time. If a writer has more than 1 transaction, the one with the lowest transaction number is chosen to be considered. + + 2. operation and transaction rate limiting + After getting 1 transaction per writer, transaction and operation rate limiting is applied. A cap is put on the number of operations and transactions allowed to be observed in a transaction time. The selection logic when the caps are exceeded is the following: + + higher fee per transaction comes first, if transaction fee is the same, lower transaction number comes first, in that order, fill the cap and ignore the rest. + + By picking the transactions with higher transaction fee, it encourages batching, while allowing small transactions to have the opportunity to also be included if they are willing to pay a slightly higher transaction fee. Alternatives to this approach are highest fee per operation and first comes first serve, but they don't encourage batching and discourage writing near the end of a transaction time. + +#### Maximum batch size + + By defining a maximum number of operations per batch, the strategy circumvents participants to anchor arbitrarily large trees on the system. At its core, this mitigation strategy forces the attacker to deal with the organic economic pressure exerted by the underlying anchoring system's transactional unit cost. Each instantiation of a Sidetree-based DID Method may select a different maximum batch size; the size for the default configuration is TBD. + +#### Proof of Fee + + Each Sidetree entry on the target anchoring system is required to include a deterministic fee, based on the number of DID operations they seek to include via the anchoring system entry. + +#### One Operation per DID per Batch + Only one operation per DID per batch is allowed, to prevent DIDs from accumulating an inordinate amount of state. + +#### Commitment and Reveal for Operations + Upon DID creation, the create operation payload must include: + 1. The hash of a _commitment_ value for the next recover operation. + 1. The hash of a _commitment_ value for the next update operation. + https://en.wikipedia.org/wiki/Commitment_scheme + + The DID owner must reproduce and reveal the correct commitment value in the subsequent operation for the operation to be considered valid. In addition, each subsequent operation must also include the hash of the new commitment value(s) for the next operation. This scheme enables efficient dismissal of counterfeit operations without needing to evaluate signatures. + +## Sidetree Client Guidelines +A Sidetree client manages the private keys and performs document operations on behalf of the DID owner. The Sidetree client needs to comply to the following guidelines to keep the DIDs it manages secure. + +1. The client MUST keep the operation payload once it is submitted to a Sidetree node until it is generally available and observed. If the submitted operation is not observed, the same operation payload MUST be resubmitted. Submitting a different operation payload would put the DID in risk of a _late publish_ attack which can lead to an unrecoverable DID if the original operation payload contains a recovery key rotation and the recovery key is lost. + + +## FAQs +* Why are we not checking signatures at observation time for all updates, recoveries, and deactivates? + + Because signature checks are computationally expensive, so we defer such compute until resolution time. + +* Why have the concept of _index files_? + + It would be useful to be able to fetch metadata about the batched operations without needing to download the actual batched operations. + This design is needed for the implementation of "light nodes". + +* Why assign a _transaction number_ to invalid transactions? + + This allows all Sidetree nodes to refer to the same transaction using the same transaction number regardless of its validity. diff --git a/docs/v1.1.0/spec/abstract.md b/docs/v1.1.0/spec/abstract.md new file mode 100644 index 000000000..ad157574d --- /dev/null +++ b/docs/v1.1.0/spec/abstract.md @@ -0,0 +1,3 @@ +## Abstract + +Sidetree is a protocol for creating scalable [Decentralized Identifier](https://w3c.github.io/did-core/) networks that can run atop any existing decentralized anchoring system (e.g. Bitcoin, Ethereum, distributed ledgers, witness-based approaches) and be as open, public, and permissionless as the underlying anchoring systems they utilize. The protocol allows users to create globally unique, user-controlled identifiers and manage their associated PKI metadata, all without the need for centralized authorities or trusted third parties. The syntax of the identifier and accompanying data model used by the protocol is conformant with the [W3C Decentralized Identifiers](https://w3c.github.io/did-core/) specification. Implementations of the protocol can be codified as their own distinct _DID Methods_ and registered in the [W3C DID Method Registry](https://w3c-ccg.github.io/did-method-registry/). \ No newline at end of file diff --git a/docs/v1.1.0/spec/appendix.md b/docs/v1.1.0/spec/appendix.md new file mode 100644 index 000000000..96339a903 --- /dev/null +++ b/docs/v1.1.0/spec/appendix.md @@ -0,0 +1,80 @@ +## Appendix + +### Test Vectors + +The Resolution test vectors are the result of applying operations and obtaining resolution results. + +- The Create Resolution is generated by applying the `Create Operation` input vector and then resolving `shortFormDid`. +- The Update Resolution is generated by applying the `Create Operation` followed by the `Update Operation` from the input vector and then resolving `shortFormDid`. +- The Recover Resolution is generated by applying the `Create Operation` followed by the `Recover Operation` from the input vector and then resolving `shortFormDid`. +- The Deactivate Resolution is generated by applying the `Create Operation` followed by the `Recover Operation` followed by the `Deactivate Operation` from the input vector and then resolving `shortFormDid`. +- The Long-Form Resolution is generated by resolving `longFormDid`. + +#### DID + +```json +[[insert: www/vectors/resolution/did.json]] +``` + +#### Operation Inputs + +The following operation inputs are in the form of Sidetree REST API Operations. + +##### Create + +```json +[[insert: www/vectors/operations/createOperation.json]] +``` + +##### Update + +```json +[[insert: www/vectors/operations/updateOperation.json]] +``` + +##### Recover + +```json +[[insert: www/vectors/operations/recoverOperation.json]] +``` + +##### Deactivate + +```json +[[insert: www/vectors/operations/deactivateOperation.json]] +``` + +#### Resolution + +##### Create + +```json +[[insert: www/vectors/resolution/afterCreate.json]] +``` + +##### Update + +```json +[[insert: www/vectors/resolution/afterUpdate.json]] +``` + +##### Recover + +```json +[[insert: www/vectors/resolution/afterRecover.json]] +``` + +##### Deactivate + +```json +[[insert: www/vectors/resolution/afterDeactivate.json]] +``` + +##### Long-Form Response + +```json +[[insert: www/vectors/resolution/longFormResponseDidDocument.json]] +``` + +### Acknowledgements +Transmute received funding from the United States Department of Homeland Security's (US DHS) Silicon Valley Innovation Program to contribute to this work item under contracts 70RSAT20T00000003, and 70RSAT20T00000033. This work item does not necessarily reflect the position or the policy of the U.S. Government and no official endorsement should be inferred. DIF is not lending or implying support/affiliation with the outside entity as a part of the acknowledgement. \ No newline at end of file diff --git a/docs/v1.1.0/spec/common-functions.md b/docs/v1.1.0/spec/common-functions.md new file mode 100644 index 000000000..1268b9114 --- /dev/null +++ b/docs/v1.1.0/spec/common-functions.md @@ -0,0 +1,39 @@ +## Common Functions + +The following is a list of functional procedures that are commonly used across the protocol. These functions are defined once here and referenced throughout the specification, wherever an implementer must invoke them to comply with normative processes. + +### Hashing Process + +All data hashed within the bounds of the protocol follow the same procedural steps, and yield a consistently encoded output. Given a data value, the following steps are used to generated a hashed output: + +1. Generate a hash of the data value using the [`HASH_PROTOCOL`](#hash-protocol) with the [`HASH_ALGORITHM`](#hash-algorithm). +2. Encode the resulting output using the [`DATA_ENCODING_SCHEME`](#data-encoding-scheme). +3. Return the encoded hashing output. + +Pseudo-code example using current protocol defaults: + +```js +let HashingOutput = Base64URL( Multihash(DATA, 0x12) ); +``` + +### Commitment Schemes + +[Commitment schemes](#commitment-scheme) are used by the Sidetree protocol in important ways to preserve the integrity of operations and assist in recovery. + +#### Public Key Commitment Scheme + +The following steps define the [commitment scheme](#commitment-scheme) for generating a [public key commitment](#public-key-commitment) from a public key. + +1. Encode the public key into the form of a valid [JWK](https://tools.ietf.org/html/rfc7517). +2. Canonicalize the [JWK](https://tools.ietf.org/html/rfc7517) encoded public key using the implementation's [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme). +3. Use the implementation's [`HASH_PROTOCOL`](#hash-protocol) to hash the canonicalized public key to generate the [`REVEAL_VALUE`](#reveal-value), then hash the resulting hash value again using the implementation's [`HASH_PROTOCOL`](#hash-protocol) to produce the [public key commitment](#public-key-commitment). + +For maximum forward cryptographic security, implementers ****SHOULD NOT**** re-use public keys across different commitment invocations. +Implementers ****MUST NOT**** re-use public key JWK payloads across different commitment invocations. + +#### JWK Nonce + +Implementers ****MAY**** define the `nonce` property in the public key JWK payload. +The `nonce` property enables the re-use of public keys across commitments without re-using the public key JWK payloads. +If the `nonce` property is defined by the implementer, the DID Owner ****MAY**** populate the `nonce` property in the public key JWK payload. +If the `nonce` property is populated, the value of the `nonce` property ****MUST**** be of size `NONCE_SIZE` and be encoded using with Base64URL encoding. diff --git a/docs/v1.1.0/spec/context.md b/docs/v1.1.0/spec/context.md new file mode 100644 index 000000000..514124460 --- /dev/null +++ b/docs/v1.1.0/spec/context.md @@ -0,0 +1,109 @@ +## Context + +Per the [DID Core Spec](https://github.com/w3c/did-core) an `@context` MAY be used to represent a DID Document as Linked Data. + +If an `@context` is present, any properties not defined in DID Core, ****MUST**** be defined in this context, or in a DID Method specific one. + +For example: + +```json +{ + "@context": [ + "https://www.w3.org/ns/did/v1", + "https://identity.foundation/sidetree/contexts/v1" + "https://example.com/method/specific.jsonld" + ] +} +``` + +### recovery + +A verificationMethod used to support DID Document [Recover Operation](https://identity.foundation/sidetree/spec/#recover) verification. + +For Example: + +```json +{ + "@context": [ + "https://www.w3.org/ns/did/v1", + "https://identity.foundation/sidetree/contexts/v1" + ], + "recovery": [{ + "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw", + "type": "EcdsaSecp256k1VerificationKey2019", + "publicKeyJwk": { + "crv": "secp256k1", + "kid": "JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw", + "kty": "EC", + "x": "dWCvM4fTdeM0KmloF57zxtBPXTOythHPMm1HCLrdd3A", + "y": "36uMVGM7hnw-N6GnjFcihWE3SkrhMLzzLCdPMXPEXlA" + } + }] +} +``` + +### operation + +A verificationMethod used to support verification of DID Document Operations: [Create](https://identity.foundation/sidetree/spec/#create), [Update](https://identity.foundation/sidetree/spec/#update), [Deactivate](https://identity.foundation/sidetree/spec/#deactivate). + +For Example: + +```json +{ + "@context": [ + "https://www.w3.org/ns/did/v1", + "https://identity.foundation/sidetree/contexts/v1" + ], + "operation": [{ + "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw", + "type": "EcdsaSecp256k1VerificationKey2019", + "publicKeyJwk": { + "crv": "secp256k1", + "kid": "JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw", + "kty": "EC", + "x": "dWCvM4fTdeM0KmloF57zxtBPXTOythHPMm1HCLrdd3A", + "y": "36uMVGM7hnw-N6GnjFcihWE3SkrhMLzzLCdPMXPEXlA" + } + }] +} +``` + +### usage + +Deprecated. DO NOT USE. + +Was introduced to support key `ops` pre sidetree protocol spec v1. + +### publicKeyJwk + +A public key in JWK format. A JSON Web Key (JWK) is a JavaScript Object Notation (JSON) data structure that represents a cryptographic key. Read [RFC7517](https://tools.ietf.org/html/rfc7517). + +Example: + +```json +{ + "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw", + "type": "EcdsaSecp256k1VerificationKey2019", + "publicKeyJwk": { + "crv": "secp256k1", + "kid": "JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw", + "kty": "EC", + "x": "dWCvM4fTdeM0KmloF57zxtBPXTOythHPMm1HCLrdd3A", + "y": "36uMVGM7hnw-N6GnjFcihWE3SkrhMLzzLCdPMXPEXlA" + } +} +``` + +### publicKeyHex + +A hex encoded compressed public key. + +Example: + +```json +{ + "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw", + "type": "EcdsaSecp256k1VerificationKey2019", + "publicKeyHex": "027560af3387d375e3342a6968179ef3c6d04f5d33b2b611cf326d4708badd7770" +} +``` diff --git a/docs/v1.1.0/spec/did-uri.md b/docs/v1.1.0/spec/did-uri.md new file mode 100644 index 000000000..d3d90053d --- /dev/null +++ b/docs/v1.1.0/spec/did-uri.md @@ -0,0 +1,105 @@ +## DID URI Composition + +DID Methods based on the Sidetree protocol all share the same identifier format. The unique identifier segment of a Sidetree-based DID, known as the [DID Suffix](#did-suffix), is derived based on the initial state of the DID's state data. The [DID Suffix](#did-suffix) is cryptographically bound to the initial PKI state of the DID, which means Sidetree DIDs are _self-certifying_. As a result, a person or entity who creates a Sidetree-based DID knows their unique identifier at the moment of generation, and it is cryptographic secured for instant use (for more on the instant use capabilities of Sidetree DIDs, see [Unpublished DID Resolution](#unpublished-did-resolution)). + +To generate the [_Short-Form DID URI_](#short-form-did){id="short-form-did"} of a Sidetree DID, use the [Hashing Process](#hashing-process) to generate a hash of the [canonicalized](#json-canonicalization-scheme) [_Create Operation Suffix Data Object_](#create-suffix-data-object). The following is an example of a resulting colon (`:`) separated DID URI composed of the URI scheme (`did:`), Method identifier (`sidetree:`), and unique identifier string (`EiBJz4...`): + +Format of Short-form DID URI: + +```html +did:METHOD: +``` + +Example of Short-Form DID URI: + +```javascript +did:sidetree:EiDahaOGH-liLLdDtTxEAdc8i-cfCz-WUcQdRJheMVNn3A +``` + +An implementer ****MAY**** define additional components in their method's DID URI composition. + +::: note +Many implementations have multiple active network instances of their DID Method (e.g. mainnet and testnet). How different network instances of a DID Method are represented in the DID URI string is method-specific. Many methods choose to use the base format above (`did:METHOD`) as their primary/mainnet network, and add an additional segment after the `:METHOD` segment to denote other network instances, for example: `did:METHOD:testnet`. DID Methods ****SHOULD**** clearly describe parsing rules for distinguishing between their different network instances. +::: + +### Long-Form DID URIs + +In many DID Methods, there is a period of time (which may be indefinite) +between the generation of a DID and the DID operation being anchored, +propagated, and processed in the underlying anchoring and storage +systems. In order to account for this, Sidetree introduces an equivalent +variant of Sidetree-based DIDs that is _self-certifying_ and _self-resolving_, +known as the [_Long-Form DID URI_](#long-form-did){id="long-form-did"}. +The [_Long-Form DID URI_](#long-form-did) variant of Sidetree-based DIDs +enables DIDs to be immediately resolvable after generation by including +the DID's initial state data within the [_Long-Form DID URI_](#long-form-did) +itself. Sidetree [_Long-Form DID URIs_](#long-form-did){id="long-form-did"} +are the [_Short-Form DID URI_](#short-form-did) with an additional +colon-separated (`:`) segment appended to the end. The value of this final +URI segment is a canonicalized JSON data payload composed of the +[_Create Operation Suffix_](#create-suffix-data-object) data and the +[_Create Operation Delta_](#create-delta-object) data, encoded +via the implementation's [`DATA_ENCODING_SCHEME`](#data-encoding-scheme). + +Long-form DID JSON data payload: + +```json +{ + "delta": { + "patches": [ + { + "action": "replace", + "document": { + "publicKeys": [ + { + "id": "anySigningKeyId", + "publicKeyJwk": { + "crv": "secp256k1", + "kty": "EC", + "x": "H61vqAm_-TC3OrFSqPrEfSfg422NR8QHPqr0mLx64DM", + "y": "s0WnWY87JriBjbyoY3FdUmifK7JJRLR65GtPthXeyuc" + }, + "purposes": [ + "auth" + ], + "type": "EcdsaSecp256k1VerificationKey2019" + } + ], + "services": [ + { + "id": "anyServiceEndpointId", + "type": "anyType", + "serviceEndpoint": "http://any.endpoint" + } + ] + } + } + ], + "updateCommitment": "EiBMWE2JFaFipPdthcFiQek-SXTMi5IWIFXAN8hKFCyLJw" + }, + "suffixData": { + "deltaHash": "EiBP6gAOxx3YOL8PZPZG3medFgdqWSDayVX3u1W2f-IPEQ", + "recoveryCommitment": "EiBg8oqvU0Zq_H5BoqmWf0IrhetQ91wXc5fDPpIjB9wW5w" + } +} +``` + +Format of Long-Form DID URI: + +```html +did:METHOD:: +``` + +Example of Long-Form DID URI: + +```javascript +did:sidetree:EiDahaOGH-liLLdDtTxEAdc8i-cfCz-WUcQdRJheMVNn3A:eyJkZWx0YSI6eyJwYXRjaGVzIjpbeyJhY3Rpb24iOiJyZXBsYWNlIiwiZG9jdW1lbnQiOnsicHVibGljX2tleXMiOlt7ImlkIjoiYW55U2lnbmluZ0tleUlkIiwiandrIjp7ImNydiI6InNlY3AyNTZrMSIsImt0eSI6IkVDIiwieCI6Ikg2MXZxQW1fLVRDM09yRlNxUHJFZlNmZzQyMk5SOFFIUHFyMG1MeDY0RE0iLCJ5IjoiczBXbldZODdKcmlCamJ5b1kzRmRVbWlmSzdKSlJMUjY1R3RQdGhYZXl1YyJ9LCJwdXJwb3NlIjpbImF1dGgiXSwidHlwZSI6IkVjZHNhU2VjcDI1NmsxVmVyaWZpY2F0aW9uS2V5MjAxOSJ9XSwic2VydmljZV9lbmRwb2ludHMiOlt7ImVuZHBvaW50IjoiaHR0cDovL2FueS5lbmRwb2ludCIsImlkIjoiYW55U2VydmljZUVuZHBvaW50SWQiLCJ0eXBlIjoiYW55VHlwZSJ9XX19XSwidXBkYXRlX2NvbW1pdG1lbnQiOiJFaUJNV0UySkZhRmlwUGR0aGNGaVFlay1TWFRNaTVJV0lGWEFOOGhLRkN5TEp3In0sInN1ZmZpeF9kYXRhIjp7ImRlbHRhX2hhc2giOiJFaUJQNmdBT3h4M1lPTDhQWlBaRzNtZWRGZ2RxV1NEYXlWWDN1MVcyZi1JUEVRIiwicmVjb3ZlcnlfY29tbWl0bWVudCI6IkVpQmc4b3F2VTBacV9INUJvcW1XZjBJcmhldFE5MXdYYzVmRFBwSWpCOXdXNXcifX0 +``` + +The [_Long-Form DID URI_](#long-form-did) variant of Sidetree-based DIDs supports the following features and usage patterns: + +- Resolving the DID Documents of unpublished DIDs. +- Authenticating with unpublished DIDs. +- Signing and verifying credentials signed against unpublished DIDs. +- After publication and propagation are complete, authenticating with either the [_Short-Form DID URI_](#short-form-did) or [_Long-Form DID URI_](#long-form-did). +- After publication and propagation are complete, signing and verifying credentials signed against either the [_Short-Form DID URI_](#short-form-did) or [_Long-Form DID URI_](#long-form-did). \ No newline at end of file diff --git a/docs/v1.1.0/spec/file-structures.md b/docs/v1.1.0/spec/file-structures.md new file mode 100644 index 000000000..615d846e4 --- /dev/null +++ b/docs/v1.1.0/spec/file-structures.md @@ -0,0 +1,231 @@ + +## File Structures + +The protocol defines the following three file structures, which house DID operation data and are designed to support key functionality to enable light node configurations, minimize permanently retained data, and ensure performant resolution of DIDs. + + + +### Core Index File + +Core Index Files contain [Create](#create), [Recover](#recover), and [Deactivate](#deactivate) operation values, as well as a CAS URI for the related [Provisional Index File](#provisional-index-file) (detailed below). As the name suggests, Core Index Files are anchored to the target anchoring system via embedding a CAS URI in the anchoring system's transactional history. + +::: example +```json +{ + "coreProofFileUri": CAS_URI, + "provisionalIndexFileUri": CAS_URI, + "writerLockId": OPTIONAL_LOCKING_VALUE, + "operations": { + "create": [ + { + "suffixData": { + "type": TYPE_STRING, + "deltaHash": DELTA_HASH, + "recoveryCommitment": COMMITMENT_HASH + } + }, + {...} + ], + "recover": [ + { + "didSuffix": SUFFIX_STRING, + "revealValue": MULTIHASH_OF_JWK + }, + {...} + ], + "deactivate": [ + { + "didSuffix": SUFFIX_STRING, + "revealValue": MULTIHASH_OF_JWK + }, + {...} + ] + } +} +``` +::: + +A valid [Core Index File](#core-index-file) is a JSON document that ****MUST NOT**** exceed the [`MAX_CORE_INDEX_FILE_SIZE`](#max-core-index-file-size). Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, ****MUST**** result in an invalidation of the entire file. + +The [Core Index File](#core-index-file) JSON document is composed as follows: + +1. The [Core Index File](#core-index-file) ****MUST**** contain a [`provisionalIndexFileUri`](#provisional-index-file-uri){id="provisional-index-file-uri"} property if the batch of transactions being anchored contains any Create, Recovery, or Update operations, and its value ****MUST**** be a _CAS URI_ for the related Provisional Index File. If the batch of transactions being anchored is only comprised of Deactivate operations, the [`provisionalIndexFileUri`](#provisional-index-file-property) property ****MUST NOT**** be present. +2. The [Core Index File](#core-index-file) ****MUST**** contain a [`coreProofFileUri`](#core-proof-file-uri){id="core-proof-file-uri"} property if the batch of transactions being anchored contains any Recovery or Deactivate operations, and its value ****MUST**** be a _CAS URI_ for the related [Core Proof File](#core-proof-file). +4. The [Core Index File](#core-index-file) ****MAY**** contain a [`writerLockId`](#writer-lock-property){id="writer-lock-property"} if the implementation chooses to implement an mechanism that requires embedded anchoring information, and if present, its value ****MUST**** comply with the specifications of the implementation. +5. If the set of operations to be anchored contain any [Create](#create), [Recover](#recover), or [Deactivate](#deactivate) operations, the [Core Index File](#core-index-file) ****MUST**** contain an `operations` property, and its value ****MUST**** be an object, composed as follows: + - If there are any [Create](#create) operations to be included in the Core Index File: + 1. The `operations` object ****MUST**** include a `create` property, and its value ****MUST**** be an array. + 2. For each [Create](#create) operation to be included in the `create` array, herein referred to as [_Core Index File Create Entries_](#core-index-file-create-entry){id="core-index-file-create-entry"}, use the following process to compose and include a JSON object for each entry: + - Each object must contain a `suffixData` property, and its value ****MUST**** be a [_Create Operation Suffix Data Object_](#create-suffix-data-object). + 3. The [Core Index File](#core-index-file) ****MUST NOT**** include multiple [Create](#create) operations that produce the same [DID Suffix](#did-suffix). + - If there are any [Recovery](#recover) operations to be included in the Core Index File: + 1. The `operations` object ****MUST**** include a `recover` property, and its value ****MUST**** be an array. + 2. For each [Recovery](#recover) operation to be included in the `recover` array, herein referred to as [_Core Index File Recovery Entries_](#core-index-file-recovery-entry){id="core-index-file-recovery-entry"}, use the following process to compose and include entries: + - The object ****MUST**** contain a `didSuffix` property, and its value ****MUST**** be the [DID Suffix](#did-suffix) of the DID the operation pertains to. An [Core Index File](#core-index-file) ****MUST NOT**** contain more than one operation of any type with the same [DID Suffix](#did-suffix). + - The object ****MUST**** contain a `revealValue` property, and its value ****MUST**** be the [`REVEAL_VALUE`](#reveal-value) of the last update commitment. + - If there are any [Deactivate](#deactivate) operations to be included in the Core Index File: + 1. The `operations` object ****MUST**** include a `deactivate` property, and its value ****MUST**** be an array. + 2. For each [Deactivate](#deactivate) operation to be included in the `deactivate` array, use the following process to compose and include entries: + - The object ****MUST**** contain a `didSuffix` property, and its value ****MUST**** be the [DID Suffix](#did-suffix) of the DID the operation pertains to. An [Core Index File](#core-index-file) ****MUST NOT**** contain more than one operation of any type with the same [DID Suffix](#did-suffix). + - The object ****MUST**** contain a `revealValue` property, and its value ****MUST**** be the [`REVEAL_VALUE`](#reveal-value) of the last update commitment. + +### Provisional Index File + +Provisional Index Files contain [Update](#update) operation proving data, as well as CAS URI links to [Chunk Files](#chunk-files). + +::: example +```json +{ + "provisionalProofFileUri": CAS_URI, + "chunks": [ + { "chunkFileUri": CAS_URI }, + {...} + ], + "operations": { + "update": [ + { + "didSuffix": SUFFIX_STRING, + "revealValue": MULTIHASH_OF_JWK + }, + {...} + ] + } +} +``` +::: + +A valid [Provisional Index File](#provisional-index-file) is a JSON document that ****MUST NOT**** exceed the [`MAX_PROVISIONAL_INDEX_FILE_SIZE`](#max-provisional-index-file-size). Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, ****MUST**** result in an invalidation of the entire file. + +The [Provisional Index File](#provisional-index-file) JSON document is composed as follows: + +1. The [Provisional Index File](#provisional-index-file) ****MUST**** contain a [`provisionalProofFileUri`](#provisional-proof-file-uri){id="provisional-proof-file-uri"} property if the batch of transactions being anchored contains any Update operations, and its value ****MUST**** be a _CAS URI_ for the related [Provisional Proof File](#provisional-proof-file). +2. The [Provisional Index File](#provisional-index-file) ****MUST**** contain a `chunks` property, and its value ****MUST**** be an array of _Chunk Entries_ for the related delta data for a given chunk of operations in the batch. Future versions of the protocol will specify a process for separating the operations in a batch into multiple _Chunk Entries_, but for this version of the protocol there ****MUST**** be only one _Chunk Entry_ present in the array. _Chunk Entry_ objects are composed as follows: + 1. The _Chunk Entry_ object ****MUST**** contain a [`chunkFileUri`](#chunk-file-uri) property, and its value ****MUST**** be a URI representing the corresponding CAS file entry, generated via the [`CAS_URI_ALGORITHM`](#cas-uri-algorithm). +3. If there are any operation entries to be included in the [Provisional Index File](#provisional-index-file) (currently only Update operations), the [Provisional Index File](#provisional-index-file) ****MUST**** include an `operations` property, and its value ****MUST**** be an object composed as follows: + - If there are any [Update](#update) entries to be included: + 1. The `operations` object ****MUST**** include an `update` property, and its value ****MUST**** be an array. + 2. For each [Update](#update) operation to be included in the `update` array, herein referred to as [Provisional Index File Update Entries](#provisional-index-file-update-entry){id="provisional-index-file-update-entry"}, use the following process to compose and include entries: + - The object ****MUST**** contain an `didSuffix` property, and its value ****MUST**** be the [DID Suffix](#did-suffix) of the DID the operation pertains to, with a maximum length as specified by the [`MAX_OPERATION_HASH_LENGTH`](#max-operation-hash-length). + - The object ****MUST**** contain a `revealValue` property, and its value ****MUST**** be the [`REVEAL_VALUE`](#reveal-value) of the last update commitment, with a maximum length as specified by the [`MAX_OPERATION_HASH_LENGTH`](#max-operation-hash-length). + +### Core Proof File + +Core Proof Files are [compressed](#compression-algorithm) JSON Documents containing the cryptographic proofs (signatures, hashes, etc.) that form the signature-chained backbone for the state lineages of all DIDs in the system. The cryptographic proofs present in Core Proof Files also link a given operation to its verbose state data, which resides in an related [Chunk File](#chunk-files). + +::: example Core Proof File +```json +{ + "operations": { + "recover": [ + { + "signedData": { + "protected": {...}, + "payload": { + "recoveryCommitment": COMMITMENT_HASH, + "recoveryKey": JWK_OBJECT, + "deltaHash": DELTA_HASH + }, + "signature": SIGNATURE_STRING + } + }, + {...} + ], + "deactivate": [ + { + "signedData": { + "protected": {...}, + "payload": { + "didSuffix": SUFFIX_STRING, + "recoveryKey": JWK_OBJECT + }, + "signature": SIGNATURE_STRING + } + }, + {...} + ] + } +} +``` +::: + +Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, ****MUST**** result in an invalidation of the entire file. + +In this version of the protocol, [Core Proof Files](#core-proof-file) are constructed as follows: + +1. The Core Proof File ****MUST**** include an `operations` property, and its value ****MUST**** be an object containing cryptographic proof entries for any Recovery and Deactivate operations to be included in a batch. Include the Proof Entries as follows: + - If there are any [Recovery](#recover) entries to be included: + 1. The `operations` object ****MUST**** include a `recover` property, and its value ****MUST**** be an array. + 2. For each [Recovery](#recover) entry to be included in the `recover` array, herein referred to as the [_Core Proof File Recovery Entry_](#core-proof-file-recovery-entry), include the operation's [_Recovery Operation Signed Data Object_](#recovery-signed-data-object) in the same index position of the operation's matching [_Core Index File Create Entry_](#core-index-file-create-entry). + - If there are any [Deactivate](#deactivate) entries to be included: + 1. The `operations` object ****MUST**** include a `deactivate` property, and its value ****MUST**** be an array. + 2. For each [Deactivate](#deactivate) entry to be included in the `deactivate` array, herein referred to as the [_Core Proof File Deactivate Entry_](#core-proof-file-deactivate-entry), include the operation's [_Deactivate Operation Signed Data Object_](#deactivate-signed-data-object) in the same index position of the operation's matching [_Core Index File Deactivate Entry_](#core-index-file-deactivate-entry). + +### Provisional Proof File + +Provisional Proof Files are [compressed](#compression-algorithm) JSON Documents containing the cryptographic proofs (signatures, hashes, etc.) for all the (eventually) prunable DID operations in the system. The cryptographic proofs present in Provisional Proof Files also link a given operation to its verbose state data, which resides in an related [Chunk File](#chunk-files). + +::: example Provisional Proof File +```json +{ + "operations": { + "update": [ + { + "signedData": { + "protected": {...}, + "payload": { + "updateKey": JWK_OBJECT, + "deltaHash": DELTA_HASH + }, + "signature": SIGNATURE_STRING + } + }, + {...} + ] + } +} +``` +::: + +Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, ****MUST**** result in an invalidation of the entire file. + +In this version of the protocol, [Provisional Proof Files](#provisional-proof-file) are constructed as follows: + +1. The Provisional Proof File ****MUST**** include an `operations` property, and its value ****MUST**** be an object containing cryptographic proof entries for any Recovery and Deactivate operations to be included in a batch. Include the Proof Entries as follows: + - If there are any [Update](#update) entries to be included: + 1. The `operations` object ****MUST**** include a `update` property, and its value ****MUST**** be an array. + 2. For each [Update](#update) entry to be included in the `update` array, herein referred to as the [_Provisional Proof File Update Entry_](#provisional-proof-file-update-entry), include the operation's [_Update Operation Signed Data Object_](#update-signed-data-object) in the same index position of the operation's matching [_Provisional Index File Update Entry_](#provisional-index-file-update-entry). + +### Chunk Files + +Chunk Files are JSON Documents, compressed via the [COMPRESSION_ALGORITHM](#compression-algorithm), that contain Sidetree Operation source data, which are composed of delta-based CRDT entries that modify the state of a Sidetree identifier's DID state. + +For this version of the protocol, there will only exist a single Chunk File that contains all the state modifying data for all operations in the included set. Future versions of the protocol will separate the total set of included operations into multiple chunks, each with their own Chunk File. + +::: example Create operation Chunk File entry +```json +{ + "deltas": [ + + { + "patches": PATCH_ARRAY, + "updateCommitment": COMMITMENT_HASH + }, + ... + ] +} +``` +::: + +Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, ****MUST**** result in an invalidation of the entire file. + +In this version of the protocol, [Chunk Files](#chunk-files) are constructed as follows: + +1. The Chunk File ****MUST**** include a `deltas` property, and its value ****MUST**** be an array containing [_Chunk File Delta Entry_](#chunk-file-delta-entry){id="chunk-file-delta-entry"} objects. +2. Each [_Chunk File Delta Entry_](#chunk-file-delta-entry) ****MUST**** be a JSON object serialized via the [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme), assembled as follows: + 1. The object ****MUST**** contain a `patches` property, and its value ****MUST**** be an array of [DID State Patches](#did-state-patches). + 2. The payload ****MUST**** contain an `updateCommitment` property, and its value ****MUST**** be the next _Update Commitment_ generated during the operation process associated with the type of operation being performed. + +3. Each [_Chunk File Delta Entry_](#chunk-file-delta-entry) ****MUST**** be appended to the `deltas` array as follows, in this order: + 1. If any Create operations were present in the associated Core Index File, append all [_Create Operation Delta Objects_](#create-delta-object) in the same index order as their matching [_Core Index File Create Entry_](#core-index-file-create-entry). + 2. If any Recovery operations were present in the associated Core Index File, append all [_Recovery Operation Delta Objects_](#recovery-delta-object) in the same index order as their matching [_Core Index File Recovery Entry_](#core-index-file-recovery-entry). + 3. If any Update operations were present in the associated Provisional Index File, append all [_Update Operation Delta Objects_](#update-delta-object) in the same index order as their matching [_Provisional Index File Update Entry_](#provisional-index-file-update-entry). diff --git a/docs/v1.1.0/spec/guidelines.md b/docs/v1.1.0/spec/guidelines.md new file mode 100644 index 000000000..f8b3801d4 --- /dev/null +++ b/docs/v1.1.0/spec/guidelines.md @@ -0,0 +1,16 @@ + +## Method & Client Guidelines + +The following are advisements and best practices for DID Method and Client (SDK, wallets, etc.) implementers that interact with Sidetree-based DID Methods. These guidelines should be carefully considered when implementing or interacting with a Sidetree-based DID Method. + +### Sidetree Operations + +A Sidetree client manages keys and performs document operations on behalf of a DID owner. The Sidetree client needs to comply to the following guidelines to securely, successfully manage a user's DIDs: + +1. The client ****MUST**** keep the operation payload once it is submitted to a Sidetree node until it is generally available and observed. If the submitted operation is not anchored and propagated, for whatever reason, the same operation payload ****MUST**** be resubmitted. Submitting a different operation payload can put the DID at risk of late publish branching, which can lead to an unrecoverable DID if the original operation payload contains a recovery key rotation and that recovery key is lost. While this is a fringe possible issue, it's best to just retain these small operation payloads. + +2. Another reason to retain operation payloads is to always have them available in the case you want to serve them across the backing Content Addressable Storage network. Most users won't elect to do this, but advanced wallets and users who seek maximum independence from any reliance on the persistence of their operations in the network may want to. + +### Update vs Recovery Keys + +It is advised that clients managing DIDs try as best as possible to separate the concepts of Update and Recovery keys. Compromise or loss of Update keys does not permanently imperil a user's control over their DID, where a loss or compromise of a Recovery key will, As such, it is important to create appropriate protections and processes for securing and using each type of key, commensurate with their level of control and risk. \ No newline at end of file diff --git a/docs/v1.1.0/spec/intro.md b/docs/v1.1.0/spec/intro.md new file mode 100644 index 000000000..22bea074f --- /dev/null +++ b/docs/v1.1.0/spec/intro.md @@ -0,0 +1,7 @@ +## Introduction + +_This section is non-normative_ + +Decentralized ledgers (e.g. Bitcoin) introduced the first-ever solution to the chronological oracle problem, which unlocked the ability to create robust decentralized identifier networks. However, current approaches that utilize event anchoring systems to create decentralized identifier networks suffer from severely limited transactional volumes and other performance issues. Sidetree is a 'Layer 2' protocol that can be implemented atop any form of event anchoring system to enable scalable [W3C _Decentralized Identifier_](https://w3c.github.io/did-core/) (DID) implementations that can be fully open, public, and permissionless. Sidetree is able to do all this without requiring trusted intermediaries, centralized authorities, special protocol tokens, or secondary consensus mechanisms, while preserving the core attributes of decentralization and immutability of the underlying anchoring systems it is implemented on. + +Architecturally, Sidetree-based DID Method implementations are overlay networks composed of independent peer nodes (_Sidetree nodes_) that interact with an underlying decentralized anchoring system (as illustrated under [Network Topology](#network-topology)) to write, observe, and process replicated DID PKI state operations using deterministic protocol rules that produce an eventually strongly consistent view of all DIDs in the network. The Sidetree protocol defines a core set of DID PKI state change _operations_, structured as delta-based Conflict-Free Replicated Data Types (i.e. [Create](#create), [Update](#update), [Recover](#recover), or [Deactivate](#deactivate)), that mutate a Decentralized Identifier's _DID Document_ state. _Sidetree nodes_ that participate in writing operations into the overlay network do so by anchoring _Content-Addressable Storage (CAS)_ (e.g. IPFS) references to aggregated bundles of _operations_ in an underlying anchoring system. The anchoring system acts as a linear chronological sequencing oracle, which the protocol leverages to order DID PKI operations in an immutable history all observing nodes can replay and validate. It is this ability to replay the precise sequence of DID PKI state change events, and process those events using a common set of deterministic rules, that allows _Sidetree nodes_ to achieve a consistent view of DIDs and their _DID Document_ states, without requiring any additional consensus mechanism. \ No newline at end of file diff --git a/docs/v1.1.0/spec/json-web-signatures.md b/docs/v1.1.0/spec/json-web-signatures.md new file mode 100644 index 000000000..4f2527541 --- /dev/null +++ b/docs/v1.1.0/spec/json-web-signatures.md @@ -0,0 +1,73 @@ +## JSON Web Signatures + +Sidetree relies on JSON Web Signatures for authentication and integrity protection of [DID Operations](https://identity.foundation/sidetree/spec/#did-operations), except for Create, with contains key material and is self certifying. + +### Signing + +In addition to [RFC7515](https://tools.ietf.org/html/rfc7515), the following additional requirements MUST be observed by Sidetree Method implementeers. + +1. `kid` MAY be present in the protected header. +2. `alg` MUST be present in the protected header, its value MUST NOT be `none`. +3. No additional members may be present in the protected header. + +Here is an example of a decoded JWS header: + +```json +{ + "kid": "did:example:123#_Qq0UL2Fq651Q0Fjd6TvnYE-faHiOpRlPVQcY_-tA4A", + "alg": "EdDSA" +} +``` + +::: warning + It is recommended that `kid` be a DID URL. If it is not, method implementers might need to rely on additional context to uniquely identify the correct verificationMethod. +::: + +### Verifying + +Regardless of which verification relationship a verificationMethod is associated with, the process of verifying a JWS linked to a DID is the same. + +The JWS header is parsed and a `kid` is extracted. + +1. Iterate the verificationMethods, until a verificationMethod with `id` equal to `kid` is found. +2. Convert the discovered verificationMethod to JWK if necessary. +3. Perform [JWS Verification](https://tools.ietf.org/html/rfc7515#section-5.2) using the JWK. + +#### Operation Verification + +Sidetree operations are considered valid when the JWS can be verified with the correct key pair designated for the type of operation being invoked (i.e. `update`, `recover`, `deactivate`). + +An [Update Operation](https://identity.foundation/sidetree/spec/#update) MUST be signed by the currently valid [Update Key Pair](#update-key-pair). + +A [Recover Operation](https://identity.foundation/sidetree/spec/#recover) MUST by signed by the currently valid [Recovery Key Pair](#recovery-key-pair). + +A [Deactivate Operation](https://identity.foundation/sidetree/spec/#deactivate) MUST by signed by the currently valid [Recovery Key Pair](#recovery-key-pair). + +::: warning + Signatures on operations may be valid, but operations may be deemed invalid for other reasons (e.g. malformed delta payload or being stale). +::: + +::: warning + It is not recommended to reuse verificationMethods for multiple verification relationships. +::: + +### Operation Anchoring Time Ranges + +A Sidetree-based DID Method ****MAY**** define the `anchorFrom` and/or `anchorUntil` properties as part of the operation’s data object payload. +If `anchorFrom` is defined by the implementer, a DID owner ****MAY**** include the earliest allowed anchoring time for their operation in the `anchorFrom` property of the operation’s data object payload. +The `anchorFrom` property is conceptually similar to the [RFC7519](https://tools.ietf.org/html/rfc7519) `nbf` and `iat` claims. +If `anchorUntil` is defined by the implementer, a DID owner ****MAY**** include the latest allowed anchoring time for their operation in the `anchorUntil` property of the operation’s data object payload. +The `anchorUntil` property is conceptually similar to the [RFC7519](https://tools.ietf.org/html/rfc7519) `exp` claim. +These properties contain numeric values; but note that anchoring systems may have differing mechanisms of time (as defined by the method). + +A Sidetree-based DID Method ****MAY**** require validation for rejecting stale operations. +An operation is considered stale relative to the timing information provided by the underlying anchoring system. +When an operation is stale according to the DID method’s parameters, the operation is deemed as invalid. +During processing, if the DID method validates stale operations, the DID owner's operation time range is compared to the anchoring system’s timing information. +Operations that are anchored prior to `anchorFrom` are deemed invalid, if `anchorFrom` is set. +Operations that are anchored after `anchorUntil` are deemed invalid, if `anchorUntil` is set (or implicitly defined). +If the operation is deemed invalid, skip the entry and iterate forward to the next entry. + +A Sidetree-based DID Method ****MAY**** constrain the range between `anchorFrom` and `anchorUntil` using a delta defined by the implementation. +The implementer ****MAY**** also implicitly define the `anchorUntil` using the `anchorFrom` plus a delta defined by the implementation. +The delta ****MAY**** be defined as the `MAX_OPERATION_TIME_DELTA` protocol parameter. diff --git a/docs/v1.1.0/spec/method-versioning.md b/docs/v1.1.0/spec/method-versioning.md new file mode 100644 index 000000000..32524af58 --- /dev/null +++ b/docs/v1.1.0/spec/method-versioning.md @@ -0,0 +1,26 @@ +## Method Versioning + +It is RECOMMENDED that Sidetree based DID Methods implement the following versioning structures to support development, testing, staging and production network deployments. + +We define a network suffix as follows for a given DID Method: + +`did:::` + +If no network suffix is provided, it is assumed that the "mainnet" or "production" network is to be used... for example, these DIDs should resolve to the same DID state: + +``` +did:elem:mainnet:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ +did:elem:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ +``` + +An ION DID on the Bitcoin Testnet3 testnet is defined as follows: + +`did:ion:testnet3:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ` + +An ELEM DID on the Ethereum Ropsten testnet is defined as follows: + +`did:elem:ropsten:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ` + +:::warning Anchoring System Forking +Implementers should be aware that if the underlying decentralized anchoring system were to fork, the identifiers will also be forked. In this case, the a new identifier must be created either through an indication at the network layer or with a completely new method name to identify the decentralized identifiers of the forked network. +::: \ No newline at end of file diff --git a/docs/v1.1.0/spec/operations.md b/docs/v1.1.0/spec/operations.md new file mode 100644 index 000000000..e64631dd8 --- /dev/null +++ b/docs/v1.1.0/spec/operations.md @@ -0,0 +1,143 @@ + + + +## DID Operations + +Sidetree-based DIDs support a variety of DID operations, all of which require the DID owner to generate specific data values and cryptographic material. The sections below describe how to perform each type of operation, and how those operations are represented in the CAS-replicated files that are anchored to the underlying anchoring system. + +While virtually all DID owners will engage User Agent applications on their local devices to perform these operations, most will not generate the anchoring transactions on the underlying anchoring system. Instead, most users will likely send the anchoring-related operation values they generate to external nodes for anchoring. This is relatively safe, because operations require signatures that an external node cannot forge. The only attack available to a rogue node operator is to not anchor the operations a DID owner sends them. However, the DID owner can detect this (via a scan of subsequent blocks) and send their operation to a different node or do it themselves, if they so desire. + +It is strongly advised that DID owners and User Agents (e.g. wallet apps) retain their DID operations and operation-anchoring files. Doing so is helpful in cases where users, or their User Agent, need to quickly access the operations and operation-anchoring files, or a user wishes to individually persist their operation and operation-anchoring files on the CAS network for even greater independent availability assurance. + +::: note + This specification does not define an API for sending public DID operation values to third-party Sidetree nodes for external anchoring, as that is an elective activity has no bearing on the technical workings of the protocol, its capabilities, or its security guarantees. +::: + +::: warning + Operations other than Create contain a compact JWS. Dereferencing of key material used to verify the JWS is a DID Method specific concern. Some methods may rely of the DID Document data model, others may rely on an internal data model. Some methods may rely on `kid` of the form `did:example:123#fingerprint`, others may not include a `kid` in the JWS, or its value may be arbitrary. Support for specific `alg` fields is also DID Method specific. Implementers are cautioned to choose support for specific `alg` values carefully. +::: + +### Create + +Use the following process to generate a Sidetree-based DID: + +1. Generate a key pair using the defined [`KEY_ALGORITHM`](#key-algorithm), let this be known as the [Update Key Pair](#update-key-pair). +2. Generate a [public key commitment](#public-key-commitment) using the defined [public key commitment scheme](#public-key-commitment-scheme) and public key of the generated [Update Key Pair](#update-key-pair), let this resulting commitment be known as the [update commitment](#update-commitment). +3. Generate a canonicalized representation of the following object using the implementation's [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme), herein referred to as the [_Create Operation Delta Object_](#create-delta-object){ id="create-delta-object" }: + ```json + { + "patches": [ PATCH_1, PATCH_2, ... ], + "updateCommitment": COMMITMENT_HASH + } + ``` + - The object ****MUST**** contain a `patches` property, and its value ****MUST**** be a JSON array of [DID State Patches](#did-state-patches). + - The object ****MUST**** contain an `updateCommitment` property, and its value ****MUST**** be the [update commitment](#update-commitment) as generated in step 2. +4. Generate a key pair using the defined [`KEY_ALGORITHM`](#key-algorithm), let this be known as the [recovery key pair](#recovery-key-pair), where the public key of this pair is used for generating the [recovery commitment](#recovery-commitment), and the private key for use in the next [recovery](#recovery) operation. +5. Generate a [public key commitment](#public-key-commitment) using the defined [public key commitment scheme](#public-key-commitment-scheme) and public key of the generated [recovery key pair](#recovery-key-pair), let this resulting commitment be known as the [recovery commitment](#recovery-commitment). +6. Generate a canonicalized representation of the following object using the implementation's [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme), herein referred to as the [_Create Operation Suffix Data Object_](#create-suffix-data-object){ id="create-suffix-data-object" }: + ```json + { + "type": TYPE_STRING, + "deltaHash": DELTA_HASH, + "recoveryCommitment": COMMITMENT_HASH, + "anchorOrigin": ANCHOR_ORIGIN + } + ``` + - The object ****MAY**** contain a `type` property, and if present, its value ****MUST**** be a type string, of a length and composition defined by the implementation, that signifies the type of entity a DID represents. + - The object ****MUST**** contain a `deltaHash` property, and its value ****MUST**** be a hash of the canonicalized [_Create Operation Delta Object_](#create-delta-object) (detailed above), generated via the [`HASH_PROTOCOL`](#hash-protocol). + - The object ****MUST**** contain a `recoveryCommitment` property, and its value ****MUST**** be the [recovery commitment](#recovery-commitment) as generated in step 5. + - The object ****MAY**** contain an `anchorOrigin` property if an implemention defines this property. This property signifies the implementer-defined system(s) that know the most recent anchor for this DID. The property's type and composition is defined by the implementation. Implementers ****MAY**** define this property since implementers with a single common anchoring system do not need to support this property. + +::: note +Implementations ****MAY**** choose to define additional properties for inclusion in the [_Create Operation Suffix Data Object_](#create-suffix-data-object), but the presence of any properties beyond the standard properties or implementation-defined properties ****ARE NOT**** permitted. +::: + +::: warning +The string values used in the type field must be carefully considered, and this specification strongly cautions implementers to avoid allowing any values that represent humans, groups of humans, or any human-identifying classifications. +::: + +### Update + +The following process must be used to update the state a Sidetree-based DID: + +1. Retrieve the _Update Reveal Value_ that matches the previously anchored _Update Commitment_. +2. Generate a canonicalized representation of the following object using the implementation's [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme), herein referred to as the [_Update Operation Delta Object_](#update-delta-object){ id="update-delta-object" }, composed as follows: + ```json + { + "patches": [ PATCH_1, PATCH_2, ... ], + "updateCommitment": COMMITMENT_HASH + } + ``` + - The object ****MUST**** contain a `patches` property, and its value ****MUST**** be an array of [DID State Patches](#did-state-patches). + - The object ****MUST**** contain a `updateCommitment` property, and its value ****MUST**** be a new _Update Commitment_, the value of which will be revealed for the next Update operation. +3. Generate an [IETF RFC 7515](https://tools.ietf.org/html/rfc7515) compliant compact JWS representation of the following object, herein referred to as the [_Update Operation Signed Data Object_](#update-signed-data-object){ id="update-signed-data-object" }, with a signature that validates against a currently active update key, and contains the following payload values: + ```json + { + "protected": {...}, + "payload": { + "updateKey": JWK_OBJECT, + "deltaHash": DELTA_HASH + }, + "signature": SIGNATURE_STRING + } + ``` + - The JWS `payload` object ****MUST**** include a `updateKey` property, and its value ****MUST**** be the [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) compliant JWK representation matching the previous _Update Commitment_. + - The JWS `payload` object ****MUST**** contain a `deltaHash` property, and its value ****MUST**** be a hash of the canonicalized [_Update Operation Delta Object_](#update-delta-object), generated via the [`HASH_PROTOCOL`](#hash-protocol), with a maximum length as specified by the [`MAX_OPERATION_HASH_LENGTH`](#max-operation-hash-length). + +### Recover + +Use the following process to recover a Sidetree-based DID: + +1. Retrieve the _Recovery Key_ that matches the previously anchored _Recovery Commitment_. This value will be used in constructing an [_Core Index File Recovery Entry_](#core-index-file-recovery-entry) for the DID being recovered. +2. Generate a new [recovery key pair](#recovery-key-pair), which ****MUST NOT**** be the same key used in any previous operations, via the [`KEY_ALGORITHM`](#key-algorithm), retaining the _Next Recovery Public Key_ for use in generating the next _Recovery Commitment_, and the private key for use in the next [Recovery](#recover) operation. +3. Create a _Recovery Commitment_ using the [Hashing Process](#hashing-process) to generate a hash value from the canonicalized [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) JWK representation (using the implementation's [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme)) of the _Next Recovery Public Key_, and retain the hash value for inclusion in an [Core Index File](#core-index-file). +4. Generate a new [Update Key Pair](#update-key-pair), which ****SHOULD NOT**** be the same key used in any previous operations, via the [`KEY_ALGORITHM`](#key-algorithm), retaining the _Next Update Public Key_ for use in generating the next _Update Commitment_, and the private key for use in the next [Update](#update) operation. +5. Create an _Update Commitment_ using the [Hashing Process](#hashing-process) to generate a hash value from the canonicalized [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) JWK representation (using the implementation's [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme)) of the _Next Update Public Key_, and retain the hash value for inclusion in the [_Recovery Operation Delta Object_](#recover-delta-object) (as described below). +6. Generate and retain a [`COMMITMENT_VALUE`](#commitment-value), in adherence with the [Commitment Schemes](#commitment-schemes) directives, for use in the next Update operation, herein referred to as the _Update Reveal Value_. +7. Generate an _Update Commitment_ using the [Hashing Process](#hashing-process), in adherence with the [Commitment Schemes](#commitment-schemes) directives, to generate a hash of the _Update Reveal Value_, and retain the resulting hash value for inclusion in an [Core Index File](#core-index-file). +8. Generate a canonicalized representation of the following object using the implementation's [`JSON_CANONICALIZATION_SCHEME`](#json-canonicalization-scheme), herein referred to as the [_Recovery Operation Delta Object_](#recover-delta-object){ id="recover-delta-object" }, composed as follows: + ```json + { + "patches": [ PATCH_1, PATCH_2, ... ], + "updateCommitment": COMMITMENT_HASH + } + ``` + - The object ****MUST**** contain a `patches` property, and its value ****MUST**** be an array of [DID State Patches](#did-state-patches). + - The object ****MUST**** contain a `updateCommitment` property, and its value ****MUST**** be the _Update Commitment_, as described above. +9. Generate an [IETF RFC 7515](https://tools.ietf.org/html/rfc7515) compliant compact JWS representation of the following object, herein referred to as the [_Recovery Operation Signed Data Object_](#recovery-signed-data-object){ id="recovery-signed-data-object" }, with a signature that validates against a currently active recovery key, and contains the following `payload` values: + ```json + { + "protected": {...}, + "payload": { + "recoveryCommitment": COMMITMENT_HASH, + "recoveryKey": JWK_OBJECT, + "deltaHash": DELTA_HASH, + "anchorOrigin": ANCHOR_ORIGIN + }, + "signature": SIGNATURE_STRING + } + ``` + - The JWS `payload` object ****MUST**** contain a `recoveryCommitment` property, and its value ****MUST**** be the next [_Recovery Commitment_](#recovery-commitment), as described above, with a maximum length as specified by the [`MAX_OPERATION_HASH_LENGTH`](#max-operation-hash-length). + - The JWS `payload` object ****MUST**** include a `recoveryKey` property, and its value ****MUST**** be the [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) JWK representation matching the previous _Recovery Commitment_. + - The JWS `payload` object ****MUST**** contain a `deltaHash` property, and its value ****MUST**** be a hash of the canonicalized [_Recovery Operation Delta Object_](#recover-delta-object), generated via the [`HASH_PROTOCOL`](#hash-protocol), with a maximum length as specified by the [`MAX_OPERATION_HASH_LENGTH`](#max-operation-hash-length). + - The JWS `payload` object ****MAY**** contain an `anchorOrigin` property if an implemention defines this property. This property signifies the implementer-defined system(s) that know the most recent anchor for this DID. The property's type and composition is defined by the implementation. Implementers ****MAY**** define this property since implementers with a single common anchoring system do not need to support this property. + +### Deactivate + +The following process must be used to deactivate a Sidetree-based DID: + +1. Retrieve the _Recovery Reveal Value_ that matches the previously anchored _Recovery Commitment_. +2. Generate a [IETF RFC 7515](https://tools.ietf.org/html/rfc7515) compliant compact JWS object, herein referred to as the [_Deactivate Operation Signed Data Object_](#deactivate-signed-data-object){ id="deactivate-signed-data-object" }, with a signature that validates against the currently active recovery key, and contains the following `payload` values: + ```json + { + "protected": {...}, + "payload": { + "didSuffix": SUFFIX_STRING, + "recoveryKey": JWK_OBJECT + }, + "signature": SIGNATURE_STRING + } + ``` + - The JWS `payload` object ****MUST**** contain a `didSuffix` property, and its value ****MUST**** be the [DID Suffix](#did-suffix) of the DID the operation pertains to, with a maximum length as specified by the [`MAX_OPERATION_HASH_LENGTH`](#max-operation-hash-length). + - The JWS `payload` object ****MUST**** include a `recoveryKey` property, and its value ****MUST**** be the [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) JWK representation matching the previous _Recovery Commitment_. + diff --git a/docs/v1.1.0/spec/parameters.md b/docs/v1.1.0/spec/parameters.md new file mode 100644 index 000000000..65c3fc7e8 --- /dev/null +++ b/docs/v1.1.0/spec/parameters.md @@ -0,0 +1,27 @@ +## Default Parameters + +Each version of the protocol will define a set of protocol rules and parameters with default suggested values. The following are the parameters used by this version of the Sidetree protocol - implementers ****MAY**** choose different values than the defaults listed below: + +| Protocol Parameter | Description | Suggested Defaults | +|-----------------------------|-------------------------------------------------------------------------------|:-------------------| +| `HASH_ALGORITHM` { #hash-algorithm } | Algorithm for generating hashes of protocol-related values. | SHA256 | +| `HASH_PROTOCOL` { #hash-protocol } | Protocol for generating hash representations in Sidetree implementations, using the [`HASH_ALGORITHM`](#hash-algorithm) | [Multihash](https://multiformats.io/multihash/) | +| `DATA_ENCODING_SCHEME` { #data-encoding-scheme } | Encoding selected for various data (JSON, hashes, etc.) used within an implementation, the output of which ****MUST**** be in ASCII format. | Base64URL | +| `JSON_CANONICALIZATION_SCHEME` { #json-canonicalization-scheme } | The scheme selected for canonicalizing JSON structures used throughout the specification. | [JCS](https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-17) | +| `KEY_ALGORITHM` { #key-algorithm } | Asymmetric public key algorithm for signing DID operations. Must be a valid JWK `crv`. | secp256k1 | +| `SIGNATURE_ALGORITHM` { #sig-algorithm } | Asymmetric public key signature algorithm. Must be a valid JWS `alg`. | ES256K | +| `CAS_PROTOCOL` { #cas-protocol } | The CAS network protocol used within an implementation. | [IPFS](https://github.com/ipfs/specs) | +| `CAS_URI_ALGORITHM` { #cas-uri-algorithm } | Algorithm for generating unique content-bound identifiers for the implementation-selected CAS protocol. | IPFS CID | +| `COMPRESSION_ALGORITHM` { #compression-algorithm } | File compression algorithm. | [GZIP](https://tools.ietf.org/html/rfc1952) | +| `REVEAL_VALUE` { #reveal-value } | Cryptographic hash of the commitment value. | SHA256 Multihash (0x12) | +| `GENESIS_TIME` { #genesis-time } | The point in the target anchoring system's transaction history at which Sidetree implementation is first activated (e.g. block number in a blockchain). | 630000 | +| `MAX_CORE_INDEX_FILE_SIZE` { #max-core-index-file-size } | Maximum compressed [Core Index File](#core-index-file) size. | 1 MB (zipped) | +| `MAX_PROVISIONAL_INDEX_FILE_SIZE` { #max-provisional-index-file-size } | Maximum compressed Provisional Index File size.| 1 MB (zipped) | +| `MAX_PROOF_FILE_SIZE` { #max-proof-file-size } | Maximum compressed Proof File size. | 2.5 MB (zipped) | +| `MAX_CHUNK_FILE_SIZE` { #max-chunk-file-size } | Maximum compressed chunk file size. | 10 MB | +| `MAX_MEMORY_DECOMPRESSION_FACTOR` { #max-memory-decompression-factor } | Maximum size after decompression. | 3x file size | +| `MAX_CAS_URI_LENGTH` { #max-cas-uri-length } | Maximum length of CAS URIs. | 100 bytes | +| `MAX_DELTA_SIZE` { #max-delta-size } | Maximum canonicalized operation delta buffer size. | 1,000 bytes | +| `MAX_OPERATION_COUNT` | Maximum number of operations per batch. | 10,000 ops | +| `MAX_OPERATION_HASH_LENGTH` { #max-operation-hash-length } | Maximum length of all hashes in CAS URI files. | 100 bytes | +| `NONCE_SIZE` {#nonce-size} | The number of bytes (octets) in nonce values. | 16 bytes | diff --git a/docs/v1.1.0/spec/patches.md b/docs/v1.1.0/spec/patches.md new file mode 100644 index 000000000..dfa1edfbe --- /dev/null +++ b/docs/v1.1.0/spec/patches.md @@ -0,0 +1,288 @@ + + +## DID State Patches + +Sidetree defines a delta-based [Conflict-Free Replicated Data Type](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type) system, wherein the metadata in a Sidetree-based implementation is controlled by the cryptographic PKI material of individual entities in the system, represented by DIDs. While the most common form of state associated with the DIDs in a Sidetree-based implementation is a [DID Document](https://w3c.github.io/did-core/), Sidetree can be used to maintain any type of DID-associated state. + +Sidetree specifies a general format for patching the state associated with a DID, called _Patch Actions_, which define how to deterministic mutate a DID's associated state. Sidetree further specifies a standard set of _Patch Actions_ (below) implementers MAY use to facilitate DID state patching within their implementations. Support of the standard set of _Patch Actions_ defined herein IS NOT required, but implementers ****MUST**** use the _Patch Action_ format for defining patch mechanisms within their implementation. The general _Patch Action_ format is defined as follows: + +```json +{ + "action": "add-public-keys", + ... +} + +{ + "action": "-custom-action", + ... +} +``` + +1. _Patch Actions_ ****MUST**** be represented as JSON objects. +2. _Patch Action_ objects ****MUST**** include an `action` property, and its value ****SHOULD**** be one of the standard _Patch Action_ types listed in below, or, if the implementer chooses to create a custom _Patch Action_, a kebab-case string (dash-delimited lowercase words) with a leading dash, to indicate a custom _Patch Action_, for example: `-custom-action`. + - `add-public-keys` + - `remove-public-keys` + - `add-services` + - `remove-services` + - `ietf-json-patch` + +### Standard Patch Actions + +The following set of standard _Patch Actions_ are specified to help align on a common set of _Patch Actions_ that provide a predictable usage pattern across Sidetree-based DID Method implementations. + +#### `add-public-keys` + +The `add-public-keys` _Patch Action_ describes the addition of cryptographic keys associated with a given DID. For any part of an `add-public-keys` _Patch Action_ to be applied to the DID's state, all specified conditions ****MUST**** be met for all properties and values, else the patch ****MUST**** be discarded in its entirety. In the case a public key entry already exists for the given `id` specified within an `add-public-keys` _Patch Action_, the implementation ****MUST**** overwrite the existing entry entirely with the incoming patch. To construct an `add-public-keys` patch, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `add-public-keys`. +2. The object ****MUST**** include a `publicKeys` property, and its value ****MUST**** be an array. +3. Each key being added ****MUST**** be represented by an entry in the `publicKeys` array, and each entry must be an object composed as follows: + 1. The object ****MUST**** include an `id` property, and its value ****MUST**** be a string with no more than fifty (50) Base64URL encoded characters. If the value is not of the correct type or exceeds the specified maximum length, the entire _Patch Action_ ****MUST**** be discarded, without any of the patch being used to modify the DID's state. + 2. The object ****MUST**** include a `type` property, and its value ****MUST**** be a string and ****SHOULD**** be of a registered [Cryptographic Suite](https://w3c-ccg.github.io/ld-cryptosuite-registry/). + 3. The object ****MAY**** include a `controller` property, and its value ****MUST**** be a DID URI string. Implementations ****MAY**** specify a maximum length for the value, and if specified, the value ****MUST NOT**** exceed it. If the `controller` property is absent, the implementation ****must**** set the corresponding property in the resolved DID Document with a value that equates to the DID Document controller's id. If the value is not of the correct type or exceeds the specified maximum length, the entire _Patch Action_ ****MUST**** be discarded, without any of the patch being used to modify the DID's state. + 4. The object ****MUST**** include either a `publicKeyJwk` or a `publicKeyMultibase` property with values as defined by [DID Core](https://w3c.github.io/did-core) and [DID Specification Registries](https://w3c.github.io/did-spec-registries). Implementers ****MAY**** choose to only define `publicKeyJwk`. These key representations are described in the JWK and Multibase subsections. Implementations ****MAY**** specify a maximum length for these values, and if specified, the values ****MUST NOT**** exceed it. If more or less than one of these properties is present, the value of the included property is not of the correct type, or the value exceeds the implementer's specified maximum length, the entire _Patch Action_ ****MUST**** be discarded, without any of the patch being used to modify the DID's state. + 5. The object ****MAY**** include a `purposes` property, and if included, its value ****MUST**** be an array of one or more strings. The value for each string ****SHOULD**** represent a verification relationship defined by [DID Core](https://w3c.github.io/did-core) or the [DID Specification Registries](https://w3c.github.io/did-spec-registries). If the value is not of the correct type or contains any string not listed below (or defined by the implementer), the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. + - **`authentication`**: a reference to the key's `id` ****MUST**** be included in the `authentication` array of the resolved _DID Document_. + - **`keyAgreement`**: a reference to the key's `id` ****MUST**** be included in the `keyAgreement` array of the resolved _DID Document_. + - **`assertionMethod`**: a reference to the key's `id` ****MUST**** be included in the `assertionMethod` array of the resolved _DID Document_. + - **`capabilityDelegation`**: a reference to the key's `id` ****MUST**** be included in the `capabilityDelegation` array of the resolved _DID Document_. + - **`capabilityInvocation`**: a reference to the key's `id` ****MUST**** be included in the `capabilityInvocation` array of the resolved _DID Document_. + +::: note +An implementer may support transformations from `publicKeyJwk` or `publicKeyMultibase` to other representations required by a particular Cryptographic Suite. +For example, an implementer may support projecting `publicKeyBase58` into the resolution result for the `Ed25519VerificationKey2018` suite. +::: + +##### JWK + +::: example +```json +{ + "action": "add-public-keys", + "publicKeys": [ + { + "id": "key1", + "purposes": ["authentication"], + "type": "EcdsaSecp256k1VerificationKey2019", + "publicKeyJwk": {...} + } + ] +} +``` +::: + +When the object contains a `publicKeyJwk`, the public key patch is using a JWK representation. The value of `publicKeyJwk` ****MUST**** be a public key expressed as a [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) compliant JWK representation for a [`KEY_ALGORITHM`](#key-algorithm) supported by the implementation. The key represented by the JWK object ****MUST**** be projected into the `verificationMethod` array of the DID Document upon resolution. If the value is not a compliant JWK representation, the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. + +##### Multibase + +::: example +```json +{ + "action": "add-public-keys", + "publicKeys": [ + { + "id": "key1", + "purposes": ["authentication"], + "type": "Ed25519VerificationKey2020", + "publicKeyMultibase": "zgo4sNiXwJTbeJDWZLXVn9uTnRwgFHFxcgDePvEC9TiTYgRpG7q1p5s7yRAic" + } + ] +} +``` +::: + +An implementer ****MAY**** define support for `publicKeyMultibase` in addition to supporting `publicKeyJwk`. + +When the object contains a `publicKeyMultibase`, the public key patch is using a multibase representation. The key represented by the multibase encoding ****MUST**** be projected into the `verificationMethod` array of the DID Document upon resolution. + +#### `remove-public-keys` + +::: example +```json +{ + "action": "remove-public-keys", + "ids": ["key1", "key2"] +} +``` +::: + +The `remove-public-keys` _Patch Action_ describes the removal of cryptographic keys associated with a given DID. For any part of an `remove-public-keys` _Patch Action_ to be applied to the DID's state, all specified conditions ****MUST**** be met for all properties and values, else the patch ****MUST**** be discarded in its entirety. In the case there exists no public key entry for an `id` specified within a `remove-public-keys` _Patch Action_, the implementation ****SHALL**** perform no action and treat application of the delete operation as a success. To construct a `remove-public-keys` _Patch Action_, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `remove-public-keys`. +2. The object ****MUST**** include a `ids` property, and its value ****MUST**** be an array of key IDs that correspond with keys presently associated with the DID that are to be removed. If the value is not of the correct type or includes a string value that is not associated with a key in the document, the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. + +#### `add-services` + +::: example +```json +{ + "action": "add-services", + "services": [ + { + "id": "sds", + "type": "SecureDataStore", + "serviceEndpoint": "http://hub.my-personal-server.com" + }, + { + "id": "did-config", + "type": "LinkedDomains", + "serviceEndpoint": { + "origins": ["https://foo.com", "https://bar.com"] + } + } + ] +} +``` +::: + +The `add-services` _Patch Action_ describes the addition of [Service Endpoints](https://w3c.github.io/did-core/#service-endpoints) to a DID's state. For any part of an `add-services` _Patch Action_ to be applied to the DID's state, all specified conditions ****MUST**** be met for all properties and values, else the patch ****MUST**** be discarded in its entirety. In the case a service entry already exists for the given `id` specified within an `add-services` _Patch Action_, the implementation ****MUST**** overwrite the existing entry entirely with the incoming patch. To construct an `add-services` patch, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `add-services`. +2. The object ****MUST**** include a `services` property, and its value ****MUST**** be an array. If the value is not of the correct type, the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. +3. Each service being added ****MUST**** be represented by an entry in the `services` array, and each entry must be an object composed as follows: + 1. The object ****MUST**** include an `id` property, and its value ****MUST**** be a string with a length of no more than fifty (50) Base64URL encoded characters. If the value is not of the correct type or exceeds the specified length, the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. + 2. The object ****MUST**** include a `type` property, and its value ****MUST**** be a string with a length of no more than thirty (30) Base64URL encoded characters. If the value is not a string or exceeds the specified length, the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. + 3. The object ****MUST**** include a `serviceEndpoint` property, and its value ****MUST**** be either a valid URI string (including a scheme segment: i.e. http://, git://) or a JSON object with properties that describe the Service Endpoint further. If the values do not adhere to these constraints, the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. + + +#### `remove-services` + +::: example +```json +{ + "action": "remove-services", + "ids": ["sds1", "sds2"] +} +``` +::: + +The `remove-services` _Patch Action_ describes the removal of cryptographic keys associated with a given DID. For any part of an `remove-services` _Patch Action_ to be applied to the DID's state, all specified conditions ****MUST**** be met for all properties and values, else the patch ****MUST**** be discarded in its entirety. In the case there exists no service entry for an `id` specified within a `remove-public-keys` _Patch Action_, the implementation ****SHALL**** perform no action and treat application of the delete operation as a success. To construct a `remove-services` _Patch Action_, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `remove-services`. +2. The object ****MUST**** include a `ids` property, and its value ****MUST**** be an array of Service Endpoint IDs that correspond with Service Endpoints presently associated with the DID that are to be removed. + +#### `replace` + +::: example +```json +{ + "action": "replace", + "document": { + "publicKeys": [ + { + "id": "key2", + "purposes": ["authentication"], + "type": "EcdsaSecp256k1VerificationKey2019", + "publicKeyJwk": {...} + } + ], + "services": [ + { + "id": "sds3", + "type": "SecureDataStore", + "serviceEndpoint": "http://hub.my-personal-server.com" + } + ] + } +} +``` +::: + +The `replace` _Patch Action_ acts as a total state reset that replaces a DID's current PKI metadata state with the state provided. The `replace` _Patch Action_ enables the declaration of public keys and service endpoints using the same schema formats as the `add-public-keys` and `add-services` _Patch Actions_. To construct a `replace` patch, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `replace`. +2. The object ****MUST**** include a `document` property, and its value ****MUST**** be an object, which may contain the following properties: + - The object ****MAY**** include a `publicKeys` property, and if present, its value ****MUST**** be an array of public key entries that follow the same schema and requirements as the public key entries from the [`add-public-keys`](#add-public-keys) _Patch Action_ + - The object ****MAY**** include a `services` property, and if present, its value ****MUST**** be an array of service endpoint entries that follow the same schema and requirements as the service endpoint entries from the [`add-services`](#add-services) _Patch Action_. + +#### `ietf-json-patch` + +The `ietf-json-patch` Patch Action describes a mechanism for modifying a DID's state using [IETF JSON Patch](https://tools.ietf.org/html/rfc6902). To construct a `ietf-json-patch` _Patch Action_, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `ietf-json-patch`. +2. The object ****MUST**** include a `patches` property, and its value ****MUST**** be an array of [IETF JSON Patch](https://tools.ietf.org/html/rfc6902) operation objects. + +If `ietf-json-patch` is used to add or remove from a proof purpose collection, such as `operations`, `recovery` or `assertionMethod`, per the DID Core spec, each collection element MUST have a unique `id` property, or be a unique string identifier. + +See [Operation Verification](https://identity.foundation/sidetree/spec/#operation-verification) for more details on how operations are verified. + +::: example 1 +```json +{ + "action": "ietf-json-patch", + "patches": [ + { "op": "add", ... }, + { "op": "remove", ... }, + { "op": "replace", ... }, + { "op": "move", ... }, + { "op": "copy", ... } + ] +} +``` +::: + +::: example 2 +```json +{ + "action": "ietf-json-patch", + "patches": [ + { + "op": "replace", + "path": "/service", + "value": [ + { + "id": "did:example:123#edv", + "type": "EncryptedDataVault", + "serviceEndpoint": "https://edv.example.com/", + } + ] + } + ] +} +``` +::: + +::: warning +Without careful validation, use of `ietf-json-patch` may result in unrecoverable states, similar to "Deactivated". +::: + +::: warning +Use of `ietf-json-patch` may harm an implmentation's ability to perform validation on operations at ingestion time, which could impact performance negatively. +::: + +#### `add-also-known-as` + +::: example +```json +{ + "action": "add-also-known-as", + "uris": [ + "did:example:1234" + ] +} +``` +::: + +The `add-also-known-as` _Patch Action_ describes the addition of [Also Known As](https://www.w3.org/TR/did-core/#also-known-as) to a DID's state. For any part of an `add-also-known-as` _Patch Action_ to be applied to the DID's state, all specified conditions ****MUST**** be met for all properties and values, else the patch ****MUST**** be discarded in its entirety. To construct an `add-also-known-as` patch, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `add-also-known-as`. +2. The object ****MUST**** include a `uris` property, and its value ****MUST**** be an array. Each value of the array ***MUST*** be a URI. If the value is not of the correct type, the entire _Patch Action_ ****MUST**** be discarded, without any of it being used to modify the DID's state. + + +#### `remove-also-known-as` + +::: example +```json +{ + "action": "remove-also-known-as", + "uris": [ + "did:example:1234" + ] +} +``` +::: + +The `remove-also-known-as` _Patch Action_ describes the removal of [Also Known As](https://www.w3.org/TR/did-core/#also-known-as) from a DID's state. For any part of an `remove-also-known-as` _Patch Action_ to be applied to the DID's state, all specified conditions ****MUST**** be met for all properties and values, else the patch ****MUST**** be discarded in its entirety. To construct a `remove-also-known-as` _Patch Action_, compose an object as follows: + +1. The object ****MUST**** include an `action` property, and its value ****MUST**** be `remove-also-known-as`. +2. The object ****MUST**** include a `uris` property, and its value ****MUST**** be an array of URIs that correspond with `Also Known As` URIs presently associated with the DID that are to be removed. diff --git a/docs/v1.1.0/spec/pof.md b/docs/v1.1.0/spec/pof.md new file mode 100644 index 000000000..7febcf234 --- /dev/null +++ b/docs/v1.1.0/spec/pof.md @@ -0,0 +1,33 @@ +## Proof of Fee + +::: note +This section is non-normative +::: + +Sidetree implementers ****MAY**** choose to implement protective mechanisms designed to strengthen a Sidetree network against low-cost spurious operations. These mechanisms are primarily designed for open, permissionless implementations utilizing public blockchains that feature native crypto-economic systems. + +### Base Fee Variable + +All of the mechanisms described in this section are based on the same underlying numeric value, known as the _Base Fee Variable_, that is calculated by processing a collection of native variables from the target anchoring system with a set of deterministic functions. The _Base Fee Variable_ is used in two primary ways: + +1. To set a minimum required native transaction fee that must be paid relative to the number of DID operations a writer seeks to anchor with the transaction +2. To establish a fee basis for any additional economic protections, such as a value locking mechanism wherein a writer must escrow or burn some amount of digital asset to have other nodes view their writes into the network as valid. + +To calculate the _Base Fee Variable_, every implementation will define a deterministic algorithm, which may be static or change dynamically via some form of logical calculation that is applied by all nodes in the system at some interval. + +### Per-Operation Fee + +An implementation may choose to require a per-operation fee, to ensure that the baseline fee paid by a writer on the anchoring system is not able to game unusually low-fee periods to flood the anchoring system with Sidetree-embedded transactions. The following logical process ****SHOULD**** be used to set and evaluate a per-operation fee for each Sidetree-bearing transaction that is observed: + +1. Determine the _Base Fee Variable_ for the current block or transaction interval being assessed. +2. Multiply the _Base Fee Variable_ by the [Operation Count](#anchor-string) integer from the [Anchor String](#anchor-string), producing the total batch operation fee. +3. Validate that the transaction anchored in the anchoring system has spent at least the sum of the total batch operation fee, as derived above. +4. If the transaction spent the required fee (or some amount greater), proceed with processing the anchored batch of DID operations. If the transaction failed to spend the required fee (or some amount greater), ignore the transaction as invalid. + +### Value Locking + +An implementation may choose to institute a value locking scheme wherein digital assets native to the underlying anchoring system are locked under some conditions set by the implementation that afford a locking entity access to greater write operation volumes and related capabilities. The basis principle of value locking is to require a form of escrow to gate consumption of resources in the network. In simple terms, with value locking in place, an implementation can require a writer who wants to write batches at the maximum size to first lock an amount of the native underlying anchoring system asset commensurate with the batch sizes they want to anchor. Implementations can create value locking mechanisms a number of ways, but the following is a general example of a value locking approach: + +1. Using the _Base Fee Variable_, assess a required locking amount that follows an implementation-defined cost curve that maps to the size of batches up to the maximum batch size. (If your implementation features recurring evaluation logic, this will be reevaluated for whatever block or transaction interval you define) +2. Using the underlying anchoring system's asset locking capabilities (e.g. a Bitcoin Timelock script), validate that all transactions observed within the current block or transaction interval are linked to a sum of locked value that meets or exceeds the required value locking amount. Each locked sum may only be linked to one batch per block or transaction interval, which means anchoring multiple batches that require locks requires multiple locks, compounding the sum that must be locked by a multi-batch writer. A link from a batch-embedded transaction to a lock is typically determined by proving control of a lock via some form of deterministic proof that ties the lock to the batch-embedded transaction (e.g. signing the batch-embedded transactions with keys that control the lock) +3. If a transaction is linked to a locked sum that has been unused by any other transactions from that lock controller during the block, proceed with ingesting the anchored batch and processing it per the directives in the [file and transaction processing](#transaction-operation-processing) section of this specification. \ No newline at end of file diff --git a/docs/v1.1.0/spec/processing.md b/docs/v1.1.0/spec/processing.md new file mode 100644 index 000000000..35fef81e9 --- /dev/null +++ b/docs/v1.1.0/spec/processing.md @@ -0,0 +1,119 @@ +## Transaction & Operation Processing + +### Transaction Anchoring + +Once an Core Index File, Provisional Index File, and associated Chunk Files have been assembled for a given set of operations, a reference to the [Core Index File](#core-index-file) must be embedded within the target anchoring system to enter the set of operations into the Sidetree implementation's global state. The following process: + +1. Generate a transaction for the underlying anchoring system +2. Generate and include the following value, herein referred to as the [_Anchor String_](#anchor-string){id="anchor-string"}, within the transaction: + 1. Generate a numerical string (`'732'`) that represents the total number of operations present in the [Core Index File](#core-index-file) and [Provisional Index File](#provisional-index-file), herein referred to as the _Operation Count_. + 2. Using the [`CAS_URI_ALGORITHM`](#cas-uri-algorithm), generate a CID for the Core Index File, herein referred to as the _Core Index File CAS URI_. + 3. Join the _Operation Count_ and _Core Index File CAS URI_ with a `.` as follows: + ```js + "10000" + "." + "QmWd5PH6vyRH5kMdzZRPBnf952dbR4av3Bd7B2wBqMaAcf" + ``` + 4. Embed the _Anchor String_ in the transaction such that it can be located and parsed by any party that traverses the history of the target anchoring system. +2. If the implementation implements a [per-op fee](#proof-of-fee), ensure the transaction includes the fee amount required for the number of operations being anchored. +3. Encode the transaction with any other data or values required for inclusion by the target anchoring system, and broadcast it. + +### CAS File Propagation + +To ensure other nodes of the implementation can retrieve the [operation files](#file-structures) required to ingest the included operations and update the states of the DIDs it contains, the implementer must ensure that the files associated with a given set of operations being anchored are available to peers seeking to request and replicate them across the CAS storage layer. Use the following procedure for propagating transaction-anchored CAS files: + +1. If the underlying anchoring system is subject to an anchoring inclusion delay (e.g. the interval between blocks in a blockchain), implementers ****SHOULD**** wait until they receive a confirmation of inclusion (whatever that means for the target anchoring system) before exposing/propagating the [operation files](#file-structures) across the CAS network. (more about the reason for this in the note below) +2. After confirmation is received, implementers ****SHOULD**** use the most effective means of proactive propagation that the [`CAS_PROTOCOL`](#cas-protocol) supports. +3. A Sidetree-based implementation node that anchors operations should not assume other nodes on the CAS network will indefinitely retain and propagate the [files](#file-structures) for a given set of operations they anchor. A node ****SHOULD**** retain and propagate any files related to the operations it anchors. + +:::note CAS propagation delay +Most anchoring systems feature some delay between the broadcast of a transaction and the recorded inclusion of the transaction in the anchoring system's history. Because operation data included in the CAS files contains revealed commitment values for operations, propagating those files before confirmation of transaction inclusion exposes revealed commitment values to external entities who may download them prior to inclusion in the anchoring system. This means an attacker who learns of the revealed commitment value can craft invalid transactions that could be included before the legitimate operation the user is attempting to anchor. While this has no affect on proof-of-control security for a DID, an observing node would have to check the signatures of fraudulent transactions before the legitimate transaction is found, which could result in slower resolution processing for the target DID. +::: + +### Transaction Processing + +Regardless of the anchoring system an implementer chooses, the implementer ****MUST**** be able to sequence Sidetree-specific transactions within it in a deterministic order, such that any observer can derive the same order if the same logic is applied. The implementer MUST, either at the native transaction level or by some means of logical evaluation, assign Sidetree-specific transactions a [_Transaction Number_](#transaction-number). [_Transaction Numbers_](#transaction-number) ****MUST**** be assigned to all Sidetree-specific transactions present in the underlying anchoring system after [`GENESIS_TIME`](#genesis-time), regardless of whether or not they are valid. + +1. An implementer ****MUST**** develop implementation-specific logic that enables deterministic ordering and iteration of all protocol-related transactions in the underlying anchoring system, such that all operators of the implementation process them in the same order. +2. Starting at [`GENESIS_TIME`](#genesis-time), begin iterating transactions using the implementation-specific logic. +3. For each transaction found during iteration that is determined to be a protocol-related transaction, process the transaction as follows: + 1. Assign the transaction a _Transaction Number_. + 2. If the implementation supports enforcement value locking, and the transaction is encoded in accordance with the implementation's value locking format, skip the remaining steps and process the transaction as described in the [Proof of Fee](#proof-of-fee) section on [Value Locking](#value-locking). + 3. The [_Anchor String_](#anchor-string) ****MUST**** be formatted correctly - if it is not, discard the transaction and continue iteration. + 4. If the implementation DOES NOT support enforcement of a [per-operation fee](#proof-of-fee), skip this step. If enforcement of a [per-operation fee](#proof-of-fee) is supported, ensure the transaction fee meets the [per-operation fee](#proof-of-fee) requirements for inclusion - if it DOES NOT, discard the transaction and continue iteration. + 5. If the implementation DOES NOT support enforcement of [Value Locking](#value-locking), skip this step. If enforcement of [Value Locking](#value-locking) is supported, ensure the transaction's fee meets the [Value Locking](#value-locking) requirements for inclusion - if it does not, discard the transaction and continue iteration. + 6. Parse the [_Anchor String_](#anchor-string) to derive the _Operation Count_ and _Core Index File CAS URI_. + 7. Use the [`CAS_PROTOCOL`](#cas-protocol) to fetch the [Core Index File](#core-index-file) using the _Core Index File CAS URI_. If the file cannot be located, retain a reference that signifies the need to retry fetch of the file. If the file successfully retrieved, proceed to the next section on how to [process an Core Index File](#core-index-file-processing) + +### Core Index File Processing + +This sequence of rules and processing steps ****must**** be followed to correctly process an [Core Index File](#core-index-file): + +1. The [Core Index File](#core-index-file) ****MUST NOT**** exceed the [`MAX_CORE_INDEX_FILE_SIZE`](#max-core-index-file-size) - if it does, cease processing, discard the file data, and retain a reference that the file is to be ignored. +2. Decompress the [Core Index File](#core-index-file) in accordance with the implementation's [`COMPRESSION_ALGORITHM`](#compression-algorithm), within the memory allocation limit specified for decompression in accordance with the implementation-defined [`MAX_MEMORY_DECOMPRESSION_FACTOR`](#max-memory-decompression-factor). +3. The [Core Index File](#core-index-file) ****MUST**** validate against the protocol-defined [Core Index File](#core-index-file) schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored. + - While this rule is articulated in the [Core Index File](#core-index-file) section of the specification, it should be emphasized to ensure accurate processing: an [Core Index File](#core-index-file) ****MUST NOT**** include multiple operations in the `operations` section of the [Core Index File](#core-index-file) for the same [DID Suffix](#did-suffix) - if any duplicates are found, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored. +4. If processing of rules 1 and 2 above resulted in successful validation of the Core Index File, initiate retrieval of the [Provisional Index File](#provisional-index-file) via the [`CAS_PROTOCOL`](#cas-protocol) using the [`provisionalIndexFileUri`](#provisional-index-file-property) property's _CAS URI_ value, if the [`provisionalIndexFileUri`](#provisional-index-file-property) property is present. This is only a ****SUGGESTED**** point at which to begin retrieval of the Provisional Index File, not a blocking procedural step, so you may continue with processing before retrieval of the [Provisional Index File](#provisional-index-file) is complete. +5. Iterate the [_Core Index File Create Entries_](#core-index-file-create-entry), and for each entry, process as follows: + 1. Derive the [DID Suffix](#did-suffix) from the values present in the entry. + 2. Ensure the [DID Suffix](#did-suffix) of the operation entry has not been included in another valid operation that was previously processed in the scope of this Core Index File. + 3. Create an entry for the operation within the _Operation Storage_ area relative to the [DID Suffix](#did-suffix). +6. Iterate the [_Core Index File Recovery Entries_](#core-index-file-recovery-entry), and for each entry, process as follows: + 1. Ensure the [DID Suffix](#did-suffix) of the operation entry has not been included in another valid operation that was previously processed in the scope of this Core Index File. + 2. Create an entry for the operation within the _Operation Storage_ area relative to the [DID Suffix](#did-suffix). +7. Iterate the [Core Index File](#core-index-file) [_Deactivate Entries_](#core-index-file-deactivate-entry), and for each entry, process as follows: + 1. Ensure the [DID Suffix](#did-suffix) of the operation entry has not been included in another valid operation that was previously processed in the scope of this Core Index File. + 2. Create an entry for the operation within the _Operation Storage_ area relative to the [DID Suffix](#did-suffix). + +### Provisional Index File Processing + +This sequence of rules and processing steps ****must**** be followed to correctly process a Provisional Index File: + +1. The [Provisional Index File](#provisional-index-file) ****MUST NOT**** exceed the [`MAX_PROVISIONAL_INDEX_FILE_SIZE`](#max-provisional-index-file-size) - if it does, cease processing, discard the file data, and retain a reference that the file is to be ignored. +2. Decompress the [Provisional Index File](#provisional-index-file) in accordance with the implementation's [`COMPRESSION_ALGORITHM`](#compression-algorithm), within the memory allocation limit specified for decompression in accordance with the implementation-defined [`MAX_MEMORY_DECOMPRESSION_FACTOR`](#max-memory-decompression-factor). +3. The [Provisional Index File](#provisional-index-file) ****MUST**** validate against the protocol-defined [Provisional Index File](#provisional-index-file) schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that all Provisional-type files and their operations are to be ignored. +4. If processing of rules 1 and 2 above resulted in successful validation of the Provisional Index File, begin retrieval of the Chunk Files by iterating the `chunks` array and using the [`CAS_PROTOCOL`](#cas-protocol) to fetch each entry's `chunkFileUri` (a _CAS URI_ based on the [`CAS_URI_ALGORITHM`](#cas-uri-algorithm)). This is only a ****SUGGESTED**** point at which to begin retrieval of the [Chunk Files](#chunk-files), not a blocking procedural step, so you may continue with processing before retrieval of the [Chunk Files](#chunk-files) is complete. +5. Iterate the [_Provisional Index File Update Entries_](#provisional-index-file-update-entry), and for each entry, process as follows: + 1. Ensure the [DID Suffix](#did-suffix) of the operation entry has not been included in another valid operation that was previously processed in the scope of the [Provisional Index File](#provisional-index-file) or its parent [Core Index File](#core-index-file). + 2. Create an entry for the operation within the _Operation Storage_ area relative to the [DID Suffix](#did-suffix). +6. If the node is in a [_Light Node_](#light-node) configuration, retain a reference to the [Chunk Files](#chunk-files) relative to the DIDs in the anchored batch for just-in-time fetch of the [Chunk Files](#chunk-files) during DID resolution. + +### Core Proof File Processing + +This sequence of rules and processing steps ****must**** be followed to correctly process an [Core Proof File](#core-proof-file): + +1. The [Core Proof File](#core-proof-file) ****MUST NOT**** exceed the [`MAX_PROOF_FILE_SIZE`](#max-proof-file-size) - if it does, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored. +2. Decompress the [Core Proof File](#core-proof-file) in accordance with the implementation's [`COMPRESSION_ALGORITHM`](#compression-algorithm), within the memory allocation limit specified for decompression in accordance with the implementation-defined [`MAX_MEMORY_DECOMPRESSION_FACTOR`](#max-memory-decompression-factor). +3. The [Core Proof File](#core-proof-file) ****MUST**** validate against the protocol-defined [Core Proof File](#core-proof-file) schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored. +4. Iterate any [_Core Proof File Recovery Entries_](#core-proof-file-recovery-entry) and [_Core Proof File Deactivate Entries_](#core-proof-file-recovery-entry) that may be present, and for each entry, process as follows: + 1. Ensure an operation for the related DID has not been included in another valid operation that was previously processed in the scope of the [Core Proof File](#core-proof-file) or its parent [Core Index File](#core-index-file). + 2. Create an entry, or associate with an existing entry, the proof payload within the _Operation Storage_ area relative to the [DID Suffix](#did-suffix). + +### Provisional Proof File Processing + +This sequence of rules and processing steps ****must**** be followed to correctly process an [Provisional Proof File](#provisional-proof-file): + +1. The [_Provisional Proof File_](#provisional-proof-file) ****MUST NOT**** exceed the [`MAX_PROOF_FILE_SIZE`](#max-proof-file-size) - if it does, cease processing, discard the file data, and retain a reference that all Provisional-type files and their operations are to be ignored. +2. Decompress the [Provisional Proof File](#provisional-proof-file) in accordance with the implementation's [`COMPRESSION_ALGORITHM`](#compression-algorithm), within the memory allocation limit specified for decompression in accordance with the implementation-defined [`MAX_MEMORY_DECOMPRESSION_FACTOR`](#max-memory-decompression-factor). +3. The [_Provisional Proof File_](#provisional-proof-file) ****MUST**** validate against the protocol-defined [_Provisional Proof File_](#provisional-proof-file) schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that all Provisional-type files and their operations are to be ignored. +4. Iterate any [_Provisional Proof File Update Entries_](#provisional-proof-file-update-entry) that may be present, and for each entry, process as follows: + 1. Ensure an operation for the related DID has not been included in another valid operation that was previously processed in the scope of the [_Provisional Proof File_](#provisional-proof-file) or its parent [Core Index File](#core-index-file). If another previous, valid operation was already processed in the scope of the [_Provisional Proof File_](#provisional-proof-file) or [Core Index File](#core-index-file) for the same DID, do not process the operation and move to the next operation in the array. + 2. Create an entry, or associate with an existing entry, the proof payload within the _Operation Storage_ area relative to the [DID Suffix](#did-suffix). + +### Chunk File Processing + +This sequence of rules and processing steps ****must**** be followed to correctly process a Chunk File chunk: + +1. The [Chunk File](#chunk-file) chunk ****MUST NOT**** exceed the [`MAX_CHUNK_FILE_SIZE`](#max-chunk-file-size) - if it does, cease processing, discard the file data, and retain a reference that the file is to be ignored. +2. Decompress the [Chunk File](#chunk-file) in accordance with the implementation's [`COMPRESSION_ALGORITHM`](#compression-algorithm), within the memory allocation limit specified for decompression in accordance with the implementation-defined [`MAX_MEMORY_DECOMPRESSION_FACTOR`](#max-memory-decompression-factor). +3. The [Chunk File](#chunk-file) ****MUST**** validate against the protocol-defined [Chunk File](#chunk-file) schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that the file is to be ignored. +4. The [canonicalized](#json-canonicalization-scheme) buffer of each [Chunk File](#chunk-file) delta entry ****must not**** exceed the [`MAX_DELTA_SIZE`](#max-delta-size). If any deltas entries exceed the maximum size cease processing, discard the file data, and retain a reference that the file is to be ignored. +5. In order to process [_Chunk File Delta Entries_](#chunk-file-delta-entry) in relation to the DIDs they are bound to, they must be mapped back to the Create, Recovery, and Update operation entries present in the [Core Index File](#core-index-file) and [Provisional Index File](#provisional-index-file). To create this mapping, concatenate the [_Core Index File Create Entries_](#core-index-file-create-entry), [_Core Index File Recovery Entries_](#core-index-file-recovery-entry), [_Provisional Index File Update Entries_](#provisional-index-file-recovery-entry) into a single array, in that order, herein referred to as the [Operation Delta Mapping Array](#operation-delta-mapping-array){id="operation-delta-mapping-array"}. Pseudo-code example: + ```js + let mappingArray = [].concat(CREATE_ENTRIES, RECOVERY_ENTRIES, UPDATE_ENTRIES); + ``` +6. With the [Operation Delta Mapping Array](#operation-delta-mapping-array) assembled, iterate the [_Chunk File Delta Entries_](#chunk-file-delta-entry) from 0 index forward, processing each [_Chunk File Delta Entry_](#chunk-file-delta-entry) as follows: + 1. Identify the operation entry from the [Operation Delta Mapping Array](#operation-delta-mapping-array) at the same index as the current iteration and determine its [DID Suffix](#did-suffix) (for [_Core Index File Create Entries_](#core-index-file-create-entry), you will need to compute the [DID Suffix](#did-suffix)). This is the DID the current iteration element maps to. + 2. Store the current [_Chunk File Delta Entry_](#chunk-file-delta-entry) relative to its operation entry in the persistent storage area designated for the related [DID Suffix](#did-suffix). + +::: note +The assembly and processing of Chunk Files will change in a future update to the protocol to accommodate the introduction of multiple chunk files. The current protocol version is designed around one Chunk File, but the scaffolding is present to move to multiple Chunk Files as development progresses. +::: diff --git a/docs/v1.1.0/spec/resolution.md b/docs/v1.1.0/spec/resolution.md new file mode 100644 index 000000000..e6c281e6f --- /dev/null +++ b/docs/v1.1.0/spec/resolution.md @@ -0,0 +1,159 @@ + + +## Resolution + +### Operation Compilation + +1. Upon invocation of resolution, retrieve all observed operations for the [DID Unique Suffix](#did-unique-suffix) of the DID URI being resolved. +2. If record of the DID being published has been observed, proceed to Step 3. If there is no observed record of the DID being published, skip all remaining [Operation Compilation](#operation-compilation) steps and process the DID as follows: + + 1. If the DID URI is a [_Short-Form DID URI_](#short-form-did), abort resolution and return _Not Found_. + 2. If the DID URI is a [_Long-Form DID URI_](#long-form-did-uris), process as follows: + 1. Isolate the last colon-separated (`:`) segment of the DID URI. + 2. Using the implementation's [`DATA_ENCODING_SCHEME`](#data-encoding-scheme), decode the value. If the values fail to properly decode in accordance with the implementation's [`DATA_ENCODING_SCHEME`](#data-encoding-scheme), abort resolution and return _Unresolvable_. + 3. JSON parse the resulting value, apply the [canonicalization](#json-canonicalization-scheme) algorithm, reencode the resulting value and ensure it is the same as the initial value from Step 1. If the values do not match, abort resolution and return _Unresolvable_. + 4. Use the [Hashing Process](#hashing-process) to generate a hash of the canonicalized [_Create Operation Suffix Data Object_](#create-suffix-data-object) and ensure it matches the [DID Unique Suffix](#did-unique-suffix), if the values do not match, abort resolution and return _Unresolvable_. + 5. Validate the resulting object in accordance with the [_Create Operation Suffix Data Object_](#create-suffix-data-object) schema. If the value is found to be a valid [_Create Operation Suffix Data Object_](#create-suffix-data-object). If the value fails validation, abort resolution and return _Unresolvable_. + 6. Validate the [_Create Operation Delta Object_](#create-delta-object) (which is present in a [_Chunk File Delta Entry_](#chunk-file-delta-entry) for published, anchored DIDs). If the value is found to be a valid [_Create Operation Delta Object_](#create-delta-object). If the value fails validation, abort resolution and return _Unresolvable_. + 7. If all steps above are successful, flag the DID as _Unpublished_ and continue to [Create operation processing](#create-operation-processing) as if the values decoded and validated in the steps above represent the only operation associated with the DID. + +3. [Constructing the _Operation Hash Map_](#operation-hash-map-construction){id="operation-hash-map-construction"}: generate a [_Create Operation Pool_](#create-operation-pool){id="create-operation-pool"}, which will house references to any Create operations processed in the steps below, and begin iterating through the operations present in the DID's _Operation Storage_ area as follows: + 1. Type-specific operation evaluation: + + - If the entry is a [Create](#create) operation: + + Create a reference for the operation in the [_Create Operation Pool_](#create-operation-pool), ensuring operations are positioned in ascending [`Anchor Time`](#anchor-time) order. + + - If the entry is a [Recovery](#recover) or [Deactivate](#deactivate) operation: + + Hash the [canonicalized](#json-canonicalization-scheme) [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) JWK representation value of the entry's `recoveryKey` property using the [`HASH_PROTOCOL`](#hash-protocol), then hash the resulting hash value again using the [`HASH_PROTOCOL`](#hash-protocol) and let the resulting hash value be the _Map Hash_ entry. + + - If the entry is an [Update](#update) operation: + + Hash the [canonicalized](#json-canonicalization-scheme) [IETF RFC 7517](https://tools.ietf.org/html/rfc7517) JWK representation value of the entry's `updateKey` property using the [`HASH_PROTOCOL`](#hash-protocol), then hash the resulting hash value again using the [`HASH_PROTOCOL`](#hash-protocol) and let the resulting hash value be the _Map Hash_ entry. + + 2. Ensure a key exists in the _Operation Hash Map_ corresponding to the _Map Hash_, and that the corresponding value is an array. If no property exists for the _Map Hash_, create one and let its value be an array. + 3. Insert the entry into the array of the _Map Hash_ at its proper position in ascending [`Anchor Time`](#anchor-time) order. + +4. [Create operation processing](#create-operation-processing){id="create-operation-processing"}: If no operations are present in the [_Create Operation Pool_](#create-operation-pool), cease resolution of the DID and return _Unresolvable_. If the [_Create Operation Pool_](#create-operation-pool) contains operation entries, process them as follows: + 1. Store the value of the `recoveryCommitment` property from the entry's [_Create Operation Suffix Data Object_](#create-suffix-data-object) as the _Next Recovery Commitment_ for use in processing the next Recovery operation. + 2. Retrieve the [_Chunk File Delta Entry_](#chunk-file-delta-entry) corresponding to the operation and proceed to Step 3. If the [_Chunk File Delta Entry_](#chunk-file-delta-entry) is not present because the associated [Chunk File](#chunk-files) has not yet been retrieved and processed (i.e. node is a [_Light Node_](#light-node) implementation, file was previously unavailable, etc.), perform the following steps: + 1. Using the [`CAS_PROTOCOL`](#cas-protocol), fetch the [Chunk File](#chunk-files) using the associated _Chunk File URI_. If the file cannot be retrieved, proceed to [recovery and deactivate operation processing](#recovery-deactivate-operation-processing). + 2. Validate the [Chunk File](#chunk-file) using the [Chunk File Processing](#chunk-file-processing) procedure. If the [Chunk File](#chunk-file) is valid. If the file is invalid, proceed to [recovery and deactivate operation processing](#recovery-deactivate-operation-processing). + 3. Validate the [_Chunk File Delta Entry_](#chunk-file-delta-entry). If the [_Chunk File Delta Entry_](#chunk-file-delta-entry) is invalid, proceed to [Recovery and deactivate operation processing](#recovery-deactivate-operation-processing). + 4. Generate a hash of the [canonicalized](#json-canonicalization-scheme) [_Chunk File Delta Entry_](#chunk-file-delta-entry) via the [`HASH_PROTOCOL`](#hash-protocol) and ensure the hash matches the value of the [_Create Operation Suffix Data Object_](#create-suffix-data-object) `deltaHash` property. If the values are ****exactly**** equal, proceed, if they are not, proceed to [recovery and deactivate operation processing](#recovery-deactivate-operation-processing). + 5. Store the `updateCommitment` value of the [_Chunk File Delta Entry_](#chunk-file-delta-entry) as the _Next Update Commitment_ for use in processing the next Update operation. + 6. Begin iterating the `patches` array in the [_Chunk File Delta Entry_](#chunk-file-delta-entry), and for each [DID State Patch](#did-state-patch) entry, perform the following steps: + 1. Validate the entry in accordance any requirements imposed by the [Patch Action](#standard-patch-actions) type indicated by the `action` value of the entry. If the entry is valid, proceed, if the entry fails validation, reverse all modifications to the DID's state and proceed to [recovery and deactivate operation processing](#recovery-deactivate-operation-processing). + 2. Apply the patch as directed by the [Patch Action](#standard-patch-actions) type specified by the `action` property. If any part of the patch fails or produces an error, reverse all modifications to the DID's state and proceed to [recovery and deactivate operation processing](#recovery-deactivate-operation-processing). + +5. [Recovery and deactivate operation processing](#recovery-deactivate-operation-processing){id="recovery-deactivate-operation-processing"}: when Create operations have been processed, process any [Recovery](#recover) and [Deactivate](#deactivate) operations that may exist in the _Operation Hash Map_ via the iteration procedure below. If no [Recovery](#recover) and [Deactivate](#deactivate) operations are present, proceed to [update operation processing](#update-operation-processing). + 1. If a property is present in the _Operation Hash Map_ that matches the _Next Recovery Commitment_ exactly, process its array of operation entries using the following steps. If no property exists in the _Operation Hash Map_ that matches the _Next Recovery Commitment_ exactly, exit [recovery and deactivate operation processing](#recovery-deactivate-operation-processing) and advance to [update operation processing](#update-operation-processing). + 2. Iterate the array of operation entries forward from 0-index using the process enumerated below until all valid entries are found and processed: + - If the entry is a [Recovery](#recover) operation: + 1. Retrieve the operation's [_Core Proof File Recovery Entry_](#core-proof-file-recovery-entry) and [_Chunk File Delta Entry_](#chunk-file-delta-entry) from the pre-processed [_Core Proof File_](#core-proof-file) and [Chunk File](#chunk-file) associated with the operation and proceed to validation of the entries, or, if the [_Core Proof File_](#core-proof-file) and [Chunk File](#chunk-file) have yet to be retrieved and processed (e.g. the resolving node is in a [_Light Node_](#light-node) configuration), perform the following steps: + 1. Using the [`CAS_PROTOCOL`](#cas-protocol), fetch the [_Core Proof File_](#core-proof-file) and [Chunk File](#chunk-files) using the associated [_Core Proof File URI_](#core-proof-file-uri) and [_Chunk File URI_](#chunk-file-uri). + 2. If the [_Core Proof File_](#core-proof-file) is unable to be retrieved, skip the entry and advance to the next operation. + 3. Validate the [_Core Proof File_](#core-proof-file). If the file is valid, proceed, if the file is invalid, skip the entry and advance to the next operation. + 2. Use the `recoveryKey` value of the [_Recovery Operation Signed Data Object_](#recovery-signed-data-object) to validate its JWS signature. If the signature is valid, proceed, if the signature is invalid, skip the entry and iterate forward to the next entry. + 3. Store the `recoveryCommitment` value of the [_Recovery Operation Signed Data Object_](#recovery-signed-data-object) as the new value for the _Next Recovery Commitment_, for use in processing the next [Recovery](#recover) operation. + 4. Validate the [Chunk File](#chunk-file) using the [Chunk File Processing](#chunk-file-processing) procedure. If the [Chunk File](#chunk-file) is valid, proceed, if the file is invalid, advance to the next operation. + 5. Validate the [_Chunk File Delta Entry_](#chunk-file-delta-entry). If the [_Chunk File Delta Entry_](#chunk-file-delta-entry) is valid, proceed, if the entry is invalid, advance to the next operation. + 6. Generate a hash of the [canonicalized](#json-canonicalization-scheme) [_Chunk File Delta Entry_](#chunk-file-delta-entry) via the [`HASH_PROTOCOL`](#hash-protocol) and ensure the hash equals the value of the [_Recovery Operation Signed Data Object_](#recovery-signed-data-object) `deltaHash` property. If the values are exactly equal, proceed, if the values are not exactly equal, advance to the next operation. + 7. Assign the `updateCommitment` value of the [_Chunk File Delta Entry_](#chunk-file-delta-entry) as the new value for the _Next Update Commitment_, for use in processing the next Update operation. If the `updateCommitment` is not present or invalid, advance to the next operation. + 8. Begin iterating the `patches` array in the [_Chunk File Delta Entry_](#chunk-file-delta-entry), and for each [DID State Patch](#did-state-patch) entry, perform the following steps: + 1. Apply the patch as directed by the [Patch Action](#standard-patch-actions) type specified by the `action` property. If any part of the patch fails or produces an error, clear all patch modifications, set the DID's Document to reflect an empty state, and advance to the next operation. + + - If the entry is a [Deactivate](#deactivate) operation: + + 1. Retrieve the operation's [_Core Proof File Deactivate Entry_](#core-proof-file-deactivate-entry) from the pre-processed [_Core Proof File_](#core-proof-file) associated with the operation and proceed, or, if the [_Core Proof File_](#core-proof-file) has yet to be retrieved and processed (e.g. the resolving node is in a [_Light Node_](#light-node) configuration), perform the following steps: + 1. Using the [`CAS_PROTOCOL`](#cas-protocol), fetch the [_Core Proof File_](#core-proof-file) using the associated [_Core Proof File URI_](#core-proof-file-uri). + 2. If the [_Core Proof File_](#core-proof-file) is unable to be retrieved, skip the entry and advance to the next operation. + 3. Validate the [_Core Proof File_](#core-proof-file). If the file is valid, proceed, if the file is invalid, skip the entry and advance to the next operation. + 2. Use the `recoveryKey` value of the [_Deactivate Operation Signed Data Object_](#deactivate-signed-data-object) to validate its JWS signature. If the signature is valid, proceed, if the signature is invalid, skip the entry and iterate forward to the next entry. + 3. The [_Deactivate Operation Signed Data Object_](#deactivate-signed-data-object) ****must**** include a `didSuffix` property with a value that exactly equal to the [DID Suffix](#did-suffix) of the DID being operated on, if the value is not exactly equal, skip the entry and proceed to the next operation. + 4. Let the DID reflect a _Deactivated_ state and process no further operation entries for this DID. + 3. Once all [Recovery](#recover) and [Deactivate](#deactivate) operations have been processed, if the _Next Update Commitment_ value is present, proceed to [update operation processing](#update-operation-processing). If the _Next Update Commitment_ value is not present or the DID is in a _Deactivated_ state, proceed to [compiled state processing](#compiled-state-processing). + +6. [Update operation processing](#update-operation-processing): if the DID is marked as _Deactivated_ or the _Next Update Commitment_ value is not present, skip [Update](#update) processing and proceed to [compiled state processing](#compiled-state-processing). If the _Next Update Commitment_ value is present and no [Deactivate](#deactivate) operations were successfully processed during [recovery and deactivate operation processing](#recovery-deactivate-operation-processing), process any Update operations that may exist in the _Operation Hash Map_ using the following processing loop: + + 1. If a property is present in the _Operation Hash Map_ that matches the _Next Update Commitment_ exactly, process its array of operation entries using the following steps. If no property exists in the _Operation Hash Map_ that matches the _Next Update Commitment_ exactly, exit [update operation processing](#update-operation-processing) and advance to [compiled state processing](#compiled-state-processing). + 2. Iterate the array of operation entries forward from 0-index using the process enumerated below until all valid entries are found and processed: + + 1. Retrieve the operation's [_Provisional Proof File Update Entry_](#provisional-proof-file-update-entry) and [_Chunk File Delta Entry_](#chunk-file-delta-entry) from the pre-processed [_Provisional Proof File_](#provisional-proof-file) and [Chunk File](#chunk-file) associated with the operation and proceed to validation of the entries, or, if the [_Provisional Proof File_](#provisional-proof-file) and [Chunk File](#chunk-file) have yet to be retrieved and processed (e.g. the resolving node is in a [_Light Node_](#light-node) configuration), perform the following steps: + 1. Using the [`CAS_PROTOCOL`](#cas-protocol), fetch the [_Provisional Proof File_](#provisional-proof-file) and [Chunk File](#chunk-files) using the associated [_Provisional Proof File URI_](#provisional-proof-file-uri) and [_Chunk File URI_](#chunk-file-uri). + 2. If the [_Provisional Proof File_](#provisional-proof-file) is unable to be retrieved, skip the entry and advance to the next operation. + 3. Validate the [_Provisional Proof File_](#provisional-proof-file). If the file is valid, proceed, if the file is invalid, skip the entry and advance to the next operation. + 2. Using the revealed `updateKey` JWK value, validate the [_Update Operation Signed Data Object_](#update-signed-data-object) signature. If the signature is valid, proceed, if the signature is invalid, skip the entry and iterate forward to the next entry. + 3. Validate the [_Chunk File_](#chunk-file) and [_Chunk File Delta Entry_](#chunk-file-delta-entry). If the [_Chunk File_](#chunk-file) and [_Chunk File Delta Entry_](#chunk-file-delta-entry) are valid, proceed, if the entry is invalid, skip the entry and iterate forward to the next entry. + 4. Generate a hash of the [canonicalized](#json-canonicalization-scheme) [_Chunk File Delta Entry_](#chunk-file-delta-entry) via the [`HASH_PROTOCOL`](#hash-protocol) and ensure the hash equals the value of the [_Update Operation Signed Data Object_](#update-signed-data-object) `deltaHash` property. If the values are exactly equal, proceed, if they are not, skip the entry and iterate forward to the next entry. + 5. Store the `updateCommitment` value of the [_Chunk File Delta Entry_](#chunk-file-delta-entry) as the _Next Update Commitment_ for use in processing the next Update operation. + 6. Begin iterating the `patches` array in the [_Chunk File Delta Entry_](#chunk-file-delta-entry), and for each [DID State Patch](#did-state-patch) entry, perform the following steps: + 1. Apply the patch as directed by the [Patch Action](#standard-patch-actions) type specified by the `action` property. If any of the patches produce an error, reverse all of this operation's patch modifications to the DID state data, while retaining the successful rotation to the next _Next Update Commitment_ value, and iterate forward to the next operation. + +6. [Compiled state processing](#compiled-state-processing){id="compiled-state-processing"}: After the DID's operations have been evaluated in the compilation steps above, the implementation ****MUST**** use the DID's compiled state to generate a valid DID Document in accordance with the [W3C Decentralized Identifiers](https://w3c.github.io/did-core/) specification. If your implementation is designed to produce a different format of state data, ensure it outputs in accordance with the format you are targeting. +7. If the implementation is outputting DID state data as a DID Document, and the DID Document is being rendered in the JSON-LD representation variant, the implementer ****SHOULD**** add an `@base` entry to the document's `@context`, and set the `@base` value to the `id` of the resolved DID. This ensures relative path values in the output DID Document are correctly projected into id-related strings by JSON-LD parsers. +8. Once a valid DID state output has been generated (e.g. a valid DID Document), proceed to the [DID Resolver Output](#did-resolver-output) process if you intend to render the output as a DID Document, in accordance with the [Decentralized Identifier Resolution](#https://w3c-ccg.github.io/did-resolution/) specification. + + +### DID Resolver Output + +The following describes how to construct [Decentralized Identifier Resolution](#https://w3c-ccg.github.io/did-resolution/)-compliant _Resolution Result_ based on a DID resolved via the [Operation Compilation](#operation-compilation) process described in the section above. + +If the DID was determined to be _Not Found_ or _Unresolvable_, return a response consistent with those states. If the compiled DID was not determined to be _Not Found_ or _Unresolvable_ (per the [Operation Compilation](#operation-compilation) process above), proceed as follows: + +1. Generate a JSON object for the _Resolution Result_, structured in accordance with the [Decentralized Identifier Resolution](https://w3c-ccg.github.io/did-resolution/#example-14-example-did-resolution-result) specification. +2. Set the `didDocument` property of the _Resolution Result_ object to the resolved DID Document generated via the [Operation Compilation](#operation-compilation) process. +5. The _Resolution Result_ object ****MUST**** include a `didDocumentMetadata` property, and its value ****MUST**** be an object composed of the following properties: + ::: example DID Document Metadata + ```json + "didDocumentMetadata": { + "deactivated": true, + "canonicalId": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg", + "equivalentId": ["did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"], + "method": { + "published": true, + "recoveryCommitment": "EiBfOZdMtU6OBw8Pk879QtZ-2J-9FbbjSZyoaA_bqD4zhA", + "updateCommitment": "EiDOrcmPtfMHuwIWN6YoihdeIPxOKDHy3D6sdMXu_7CN0w" + } + } + ``` + ::: + + - `deactivated` - This property ****MUST**** be present if the resolved DID is determined to be in a deactivated state, and it ****MUST**** be set to the boolean value `true`. If the resolved DID is not in a deactivated state, this value ****MUST**** be set to the boolean value `false`. + - `canonicalId` - If canonical representation of the resolved DID exists, the implementation ****MUST**** include the `canonicalId` property, and the presence and value of the `canonicalId` property is determined as follows: + 1. Presence and value of the `canonicalId` property: + - If the DID being resolved is a [_Long-Form DID_](#long-form-did-uris) representation and is unpublished, the `canonicalId` property ****MUST NOT**** be included in the `didDocumentMetadata` object. + - If the DID being resolved is a [_Long-Form DID_](#long-form-did-uris) representation and is published, the `canonicalId` property ****MUST**** be included in the `didDocumentMetadata` object, and its value ****MUST**** be the [_Short-Form DID_](#short-form-did) representation. + - If the DID being resolved is a [_Short-Form DID_](#short-form-did) representation and is published, the `canonicalId` property ****MUST**** be included in the `didDocumentMetadata` object, and its value ****MUST**** be the [_Short-Form DID_](#short-form-did) representation. + 2. Inclusion of the canonical DID representation in the `equivalentId` array: + - If under any of the cases above there is a canonical DID representation included for the `canonicalId` property, the canonical DID representation ****MUST**** also be included in the `equivalentId` array. See below for details on the `equivalentId` property. + - `equivalentId` - If equivalent representations of the resolved DID exist, the implementation ****MUST**** include the `equivalentId` property, and the presence and value of the `equivalentId` property is determined as follows: + - If the DID being resolved is a [_Long-Form DID_](#long-form-did-uris) representation, the `equivalentId` property ****MUST**** be included in the `didDocumentMetadata` object, and its array value ****MUST**** include the [_Short-Form DID_](#short-form-did) representation. + - `method` - Its value ****MUST**** be an object composed of the following values: + 1. The object ****MUST**** include a `published` property with a boolean value. If the compiled DID state is flagged as _Unpublished_ and/or _Not Found_ (per the [Operation Compilation](#operation-compilation) process), the `published` property ****MUST**** be set to `false`, otherwise, set the value to `true` if a valid anchoring entry was located for the DID. + 2. The object ****MUST**** include an `updateCommitment` property, and its value ****MUST**** be the `updateCommitment` hash value expected to be fulfilled in with the next `updateKey` revealed in the next [Update](#update) operation. + 3. The object ****MUST**** include an `recoveryCommitment` property, and its value ****MUST**** be the `recoveryCommitment` hash value expected to be fulfilled in with the next `recoveryKey` revealed in the next [Recovery](#recover) operation. + + +#### Unresolvable DIDs + +If a DID is found to be unresolvable, per the logic defined under the [Operation Compilation](#operation-compilation) section, a Sidetree-compliant node ****SHOULD**** return the appropriate error code over the transport of the resolution request. For HTTP, you ****MUST**** return the responses and status codes defined by the [Sidetree API specification](https://identity.foundation/sidetree/api) section on [Resolution](https://identity.foundation/sidetree/api/#sidetree-resolution). + +### Late Publishing + +Sidetree is an eventually strongly consistent, conflict-free state resolution system based on cryptographically signed, delta-based DID operations, which derives its deterministic order of operations from the position of operation entries in a decentralized anchoring system. Unlike the native tokens of a strongly immutable anchoring system (e.g. Bitcoin), DIDs represent unique identifiers that are generally intended to be _non-transferable_. As such, the Sidetree protocol provides no technical mechanism for exchanging ownership of DIDs with 'double-spend' assurance, the way one might do with a fungible cryptocurrency token. + +For Sidetree, _non-transferability_ manifests in a distinct way: a DID owner is ultimately in control of their past, present, and future state changes, and can expose state change operations as they choose across the lineage of their DID's operational history. DID owners can create forks within _their own_ DID state history, and nothing forces them to expose DID state operations they anchor. A DID operation anchored in the past, at Time X, can be exposed to sometime in the future, at Time Y. This means Sidetree nodes could become aware of past operations that create a change in the lineage of a DID - this is known as _Late Publishing_ of a DID operation. However, due to the _non-transferability_ of DIDs, this condition is isolated to each DID's own state lineage, and resolved by Sidetree's deterministic ruleset, which guarantees only one fork of a DID's state history can ever be valid. To better understand this, consider the following diagram that illustrates a DID owner, Alice, creating forks by creating and anchoring operations in the past that she does not expose to the network: + +```mermaid +graph TB + 0 --> 1 + 1 --> 2a + 1 --> 2b + 2b --> 3 +``` + +As you can see above, Alice has created a fork by anchoring the divergent operations `2a` and `2b`. Let us assume Alice refrains from publishing the [CAS files](#file-structures) that other Sidetree nodes would detect to locate and replicate the date for operation `2a`, and further, assume Alice continues creating more operation history stemming from operation `2b`. Whenever Alice exposes the DID operation data for `2a`, other Sidetree nodes will need to decide which operation between `2a` and `2b` is the 'right' operation. The Sidetree protocol includes a strict rule that resolves this conflict, and any variation of it: the earliest operation in [Anchor Time](#anchor-time) always wins. If operation `2a` precedes operation `2b` in [Anchor Time](#anchor-time), whenever she decides to publish operation `2a`, all other Sidetree nodes would process the operation and immediately deem operation `2a` to be the valid, correct operational fork. This remains true even if Alice continues building operational history stemming from operation `2b` any amount of time into the future. + +With this example of _late publishing_ in mind, the most important aspect to remember is that DID owners decide what the PKI state of their DIDs should be, and remain in control of that state independent of the shape of their DID operational history. The net takeaway is that regardless of how a DID owner decides to update the state of their DID, the decision over what that state is remains entirely their choice. diff --git a/docs/v1.1.0/spec/terminology.md b/docs/v1.1.0/spec/terminology.md new file mode 100644 index 000000000..c4712e21d --- /dev/null +++ b/docs/v1.1.0/spec/terminology.md @@ -0,0 +1,32 @@ +## Terminology + +| Term | Description | +|-----------------------|--------------------------------------------------------------------------------| +| Anchoring System { #anchoring-system } | A decentralized sequencing oracle (e.g. Bitcoin, Ethereum, distributed ledgers, witness-based approaches) that can be used to determine the order of PKI state transformations for Decentralized Identifiers (DIDs), which can be deterministically verified to derive the current PKI state of DIDs. | +| Witness System { #witness-system } | Synonym for [Anchoring System](#anchoring-system), see above. | +| Core Index File | JSON Document containing proving and index data for Create, Recovery, and Deactivate operations, and a CAS URI for the associated Provisional Index File. This file is anchored to the target anchoring system. | +| Provisional Index File | JSON Document containing Update operation proving and index data, as well as CAS URI for Chunk File chunks. | +| Core Proof File | JSON Document containing the cryptographic proofs for Recovery and Deactivate operations, which form the persistent backbone of DID PKI lineages. | +| Provisional Proof File | JSON Document containing the cryptographic proofs for Update operations, which can be pruned via decentralized checkpointing mechanisms (this mechanism will arrive in future versions of the Sidetree protocol). | +| Chunk File | JSON Document containing all verbose operation data for the corresponding set of DIDs specified in the related Provisional Index File. | +| CAS { #cas } | Content-addressable storage protocol/network (e.g. IPFS) | +| CAS URI { #cas-uri } | The unique content-bound identifier used to locate a resource via the [CAS](#cas) protocol/network (e.g. IPFS) | +| Commit Value { #commit-value } | A chosen value that is used with a [commitment scheme](#commitment-scheme) | +| Commitment { #commitment } | The output of a [commitment scheme](#commitment-scheme) | +| Commitment Scheme { #commitment-scheme } | A cryptographic primative that allows one to commit to a chosen value, known as the [commit value](#commit-value) resulting in the generation of a [commitment](#commitment). A [commitment](#commitment) can then be shared without revealing the [commit value](#commit-value) forming a `proof of commitment` where the possessor of the [commit value](#commit-value) can then later reveal the [commit value](#commit-value) proving the original commitment. +| DID Document | JSON Document containing public key references, service endpoints, and other PKI metadata that corresponds to a given DID (as defined in the [W3C DID Specification](https://w3c.github.io/did-core/)). This is the most common form of DID state used in Sidetree implementations. | +| DID Suffix { #did-suffix } | The unique identifier string within a DID URI. e.g. The unique suffix of `did:sidetree:123` would be `123`. | +| DID Suffix Data | Data required to deterministically generate a DID. | +| Multihash { #multihash } | Protocol for differentiating outputs from common cryptographic hash functions, addressing size and encoding considerations: https://multiformats.io/multihash/ | +| DID Operation | Set of delta-based CRDT patches that modify a DID's state data when applied. | +| Operation Request | JWS formatted request sent to a _Sidetree Node_ to include a _DID Operation_ in a batch of operations. | +| Update Key Pair {#update-key-pair}| A cryptographic key used to produce an _Update Request_ JWS. Public key representation MUST be used to produce _Update Request_ commitment. | +| Recovery Key Pair {#recovery-key-pair} | A cryptographic key used to produce an _Operation Request_ of type Recover or Deactivate. Public key representation MUST be used to produce _Operation Request_ commitment. | +| Public Key Commitment { #public-key-commitment } | The resulting [commitment](#commitment) obtained by applying the defined [commitment scheme](#operation-commitment-scheme) to a public key | +| Recovery Commitment { #recovery-commitment } | The resulting [commitment](#commitment) obtained by applying the defined [commitment scheme](#recovery-commitment-scheme) to the public key of a [recovery key pair](#recovery-key-pair) | +| Sidetree Node | Executable code that implements all the required components, functionality, and rules specified in the Sidetree protocol specification. | +| Transaction | Anchoring System transaction that anchors a set of Sidetree operations, via a CAS URI for an associated Core Index File. | +| Anchor String | The string anchored to the anchoring system, composed of the CAS URI to the [Core Index File](#core-index-file), prefixed with the declared operation count. | +| Anchor Time { #anchor-time } | The logical order of operations, as determined by the underlying anchoring system (e.g. Bitcoin block and transaction order). Anchoring systems may widely vary in how they determine the logical order of operations, but the only requirement of an anchoring system is that it can provide a means to deterministically order each operation within a DID's operational lineage. | +| Transaction Number { #transaction-number } | A monotonically increasing number deterministically ordered and assigned to every transaction relative to its position in [Anchor Time](#anchor-time). | +| Light Node { #light-node } | A node that downloads and processes only [Core Index Files](#core-index-file) and [Provisional Index Files](#provisional-index-file) on a proactive basis, waiting until resolution time to download and process the [Chunk File](#chunk-files) related to a given DID. This type of configuration enables a node to operate trustlessly while consuming approximately one order of magnitude less storage. | diff --git a/docs/v1.1.0/spec/title.md b/docs/v1.1.0/spec/title.md new file mode 100644 index 000000000..16fabfa45 --- /dev/null +++ b/docs/v1.1.0/spec/title.md @@ -0,0 +1,31 @@ +Sidetree v1.0.1 +================== + +**Specification Status:** DIF Ratified Specification + +**Latest published version:** + [identity.foundation/sidetree/spec](https://identity.foundation/sidetree/spec) + +**Editors:** +~ [Daniel Buchner](https://www.linkedin.com/in/dbuchner/) (Microsoft) +~ [Orie Steele](https://www.linkedin.com/in/or13b/) (Transmute) +~ [Troy Ronda](https://www.linkedin.com/in/troyronda/) (SecureKey) + +**Contributors:** +~ [Henry Tsai](https://www.linkedin.com/in/henry-tsai-6b884014/) (Microsoft) +~ [Mudassir Ali](https://www.linkedin.com/in/mudassir-ali-4981654/) (Microsoft) +~ [Guillaume Dardelet](https://www.linkedin.com/in/guillaume-dardelet/) (Transmute) +~ [Isaac Chen](https://www.linkedin.com/in/isaac-chen-921079127/) (Microsoft) +~ [Christian Lundkvist](https://www.linkedin.com/in/chrislun/) (Consensys) +~ [Kyle Den Hartog](https://www.linkedin.com/in/kyledenhartog/) (Mattr) +~ [Tobias Looker](https://www.linkedin.com/in/tplooker/) (Mattr) + +**Participate:** +~ [GitHub repo](https://github.com/decentralized-identity/sidetree) +~ [File a bug](https://github.com/decentralized-identity/sidetree/issues) +~ [Commit history](https://github.com/decentralized-identity/sidetree/commits/master) + +**Sidetree REST API specification:** + [identity.foundation/sidetree/api](https://identity.foundation/sidetree/api) + +------------------------------------ diff --git a/docs/v1.1.0/spec/topology.md b/docs/v1.1.0/spec/topology.md new file mode 100644 index 000000000..ceccfaf1d --- /dev/null +++ b/docs/v1.1.0/spec/topology.md @@ -0,0 +1,9 @@ +## Network Topology + +The figure below illustrates the three primary components of a Sidetree-based DID overlay network: + +1. The underlying anchoring system that serves as the global anchoring and linear sequencing system for DID operations. +2. The Sidetree nodes themselves, which interact with the anchoring system to anchor operations, fetch and replicate data from the CAS network, and process operations in accordance with the protocol deterministic ruleset. +3. An integrated Content-Addressable Storage (CAS) network layer Sidetree nodes use to distribute and replicate DID operation files. + + diff --git a/docs/v1.1.0/spec/versioning.md b/docs/v1.1.0/spec/versioning.md new file mode 100644 index 000000000..2a1527281 --- /dev/null +++ b/docs/v1.1.0/spec/versioning.md @@ -0,0 +1,37 @@ +## Protocol Versioning + +The rules and parameters of the Sidetree protocol MAY change in the future, resulting in new versions of the specification. The Sidetree specification and reference implementation follow [SemVer 2.0](https://semver.org/). + +Versions of the specification can be found on the Decentralized Identity Foundation's website at the following version-based paths: + +**Latest Draft** + +```html +https://identity.foundation/sidetree/spec/ +``` + +**Specific Versions** + +```html +https://identity.foundation/sidetree/spec/v../ +``` + +Versions of the Sidetree reference implementation are also provided as npm modules and [GitHub releases](https://github.com/decentralized-identity/sidetree/releases): + + +```json +{ + "name": "@decentralized-identity/sidetree", + "version": "..", + ... +``` + +### Version Segment Definitions + +- **Major:** Major protocol evolution, with breaking protocol advancements so large they warrant incrementing the major version. +- **Minor:** Critical updates, protocol forking changes, or security patches that require all nodes to upgrade. +- **Patch:** Non-critical changes that do not require nodes to upgrade. + +### New Version Activation + +New versions of the protocol, or modifications to parameter values by implementers, ****MUST**** be activated at a specified [_Anchor Time_](#anchor-time) so all nodes can remain in sync by enforcing the same parameter configuration and protocol rules at the same logical starting point. All transactions that occur after the specified [_Anchor Time_](#anchor-time) will adhere to the associated version's rules and parameters until a newer version of the protocol is defined and implemented at a future [_Anchor Time_](#anchor-time). \ No newline at end of file diff --git a/docs/v1.1.0/styleguide.md b/docs/v1.1.0/styleguide.md new file mode 100644 index 000000000..39a5f3992 --- /dev/null +++ b/docs/v1.1.0/styleguide.md @@ -0,0 +1,12 @@ +# Style Guide + +The sidetree specification relies on JSON and the current node implementations rely on HTTP. + +Unless otherwise noted, the following style guides should be used. + +- [Microsoft API Guidelines](https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md) +- [Google JSON Style Guide](https://google.github.io/styleguide/jsoncstyleguide.xml) +- [JSON API](https://jsonapi.org/recommendations) +- [RESTful API Guidelines](https://restfulapi.net/) + +The reference implementation also makes use a of custom eslint configuration, please be sure to follow the lint rules that are set for this project. \ No newline at end of file diff --git a/docs/v1.1.0/type-registry.md b/docs/v1.1.0/type-registry.md new file mode 100644 index 000000000..6f6d38f74 --- /dev/null +++ b/docs/v1.1.0/type-registry.md @@ -0,0 +1,15 @@ +# Sidetree DID Type Registry + +The following table is a registry of DID type strings that can be used in Sidetree implementations that support the type declaration property noted in the [Create Operation Suffix Data Object](https://identity.foundation/sidetree/spec/#create-suffix-data-object). + +The registry is predicated on mapping well-known schema-defined objects to terse byte strings. All types are of non-human entities, objects, and things. To propose additions to the list, file an Issue with this repo and add the `did-type` and `feature` tags. + + +| Type Name | Schema | Type String | +|-------------------------|-------------------------------------------|:------------| +| Organization | https://schema.org/Organization | 0001 | +| Government Organization | https://schema.org/GovernmentOrganization | 0002 | +| Corporation | https://schema.org/Corporation | 0003 | +| Local Business | https://schema.org/LocalBusiness | 0004 | +| Software Package | https://schema.org/SoftwareSourceCode | 0005 | +| Web App | https://schema.org/WebApplication | 0006 | diff --git a/specs.json b/specs.json index 14cf06b48..370d2c3ac 100644 --- a/specs.json +++ b/specs.json @@ -4,7 +4,57 @@ "resource_path": "./www", "specs": [ { - "title": "DIF Sidetree Protocol", + "title": "DIF Sidetree Protocol v1.0.1", + "logo": "https://rawcdn.githack.com/decentralized-identity/decentralized-identity.github.io/a3ca39717e440302d1fd99a796e7f00e1c42eb2d/images/logo-flat.svg", + "logo_link": "https://identity.foundation", + "source": { + "host": "github", + "account": "decentralized-identity", + "repo": "sidetree" + }, + "output_path": "./www/spec/v1.0.1", + "spec_directory": "./docs/v1.0.1/spec", + "markdown_paths": [ + "title.md", + "abstract.md", + "intro.md", + "terminology.md", + "versioning.md", + "parameters.md", + "common-functions.md", + "topology.md", + "file-structures.md", + "did-uri.md", + "json-web-signatures.md", + "operations.md", + "patches.md", + "processing.md", + "pof.md", + "resolution.md", + "method-versioning.md", + "context.md", + "guidelines.md", + "appendix.md" + ] + }, + { + "title": "DIF Sidetree REST API v1.0.1", + "logo": "https://rawcdn.githack.com/decentralized-identity/decentralized-identity.github.io/a3ca39717e440302d1fd99a796e7f00e1c42eb2d/images/logo-flat.svg", + "logo_link": "https://identity.foundation", + "source": { + "host": "github", + "account": "decentralized-identity", + "repo": "sidetree" + }, + "output_path": "./www/api/v1.0.1", + "spec_directory": "./docs/v1.0.1/api", + "markdown_paths": [ + "title.md", + "content.md" + ] + }, + { + "title": "DIF Sidetree Protocol v1.1.0", "logo": "https://rawcdn.githack.com/decentralized-identity/decentralized-identity.github.io/a3ca39717e440302d1fd99a796e7f00e1c42eb2d/images/logo-flat.svg", "logo_link": "https://identity.foundation", "source": { @@ -13,7 +63,7 @@ "repo": "sidetree" }, "output_path": "./www/spec", - "spec_directory": "./docs/spec", + "spec_directory": "./docs/v1.1.0/spec", "markdown_paths": [ "title.md", "abstract.md", @@ -38,7 +88,7 @@ ] }, { - "title": "DIF Sidetree REST API", + "title": "DIF Sidetree REST API v1.1.0", "logo": "https://rawcdn.githack.com/decentralized-identity/decentralized-identity.github.io/a3ca39717e440302d1fd99a796e7f00e1c42eb2d/images/logo-flat.svg", "logo_link": "https://identity.foundation", "source": { @@ -47,7 +97,7 @@ "repo": "sidetree" }, "output_path": "./www/api", - "spec_directory": "./docs/api", + "spec_directory": "./docs/v1.1.0/api", "markdown_paths": [ "title.md", "content.md" diff --git a/www/api/v1.0.1/index.html b/www/api/v1.0.1/index.html new file mode 100644 index 000000000..e21b446cd --- /dev/null +++ b/www/api/v1.0.1/index.html @@ -0,0 +1,312 @@ + + + + + + + + + DIF Sidetree REST API v1.0.1 + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+

§ Sidetree REST API

+

Specification Status: Editor’s Draft

+

Latest published version: +identity.foundation/sidetree/api

+
+
Editors:
+
Troy Ronda (SecureKey)
+
Henry Tsai (Microsoft)
+
Contributors:
+
Mudassir Ali (Microsoft)
+
Isaac Chen (Microsoft)
+
Kyle Den Hartog (Mattr)
+
Daniel Buchner (Microsoft)
+
Orie Steele (Transmute)
+
Participate:
+
GitHub repo
+
File a bug
+
Commit history
+
+

Sidetree protocol specification: +identity.foundation/sidetree/spec

+
+

§ REST API

+

The following sections define the Sidetree resolution and operations endpoints. Please refer to the companion Sidetree REST API specification for additional information, as well as REST API definitions for the anchoring and CAS components.

+

§ Sidetree Resolution

+

Sidetree resolution requests to the REST API are based on the DID Resolution HTTP(S) binding. +Resolution requests consist of a DID and MAY include DID parameters. +As detailed in Resolution, the resolution request MAY include the initial state DID parameter.

+

The server responds with the DID Resolution Result composed of the DID Document and Method Metadata. +Sidetree defines published, updateCommitment, and recoveryCommitment method metadata.

+ +
EXAMPLE
{
+    "@context": "https://w3id.org/did-resolution/v1",
+    "didDocument": DID_DOCUMENT_OBJECT,
+    "didDocumentMetadata": {
+        "method": {
+            "published": boolean,
+            "updateCommitment": UPDATE_COMMITMENT,
+            "recoveryCommitment": RECOVERY_COMMITMENT
+        }
+    }
+}
+
+
+

A resolution is requested as follows:

+
    +
  1. The client MUST send a GET to the Sidetree resolution endpoint /identifiers/{did-with-or-without-initial-state} under the desired REST server path.
  2. +
  3. If the DID does not exist and initial state was not provided: +
      +
    • The server MUST respond with HTTP Status Code 404.
    • +
    +
  4. +
  5. If the DID does not exist and valid initial state was provided: +
      +
    • The server MUST respond with HTTP Status Code 200.
    • +
    • The server MUST include the didDocument property, with its value set to the initial DID document that is constructed from the initial state.
    • +
    • The server MUST include the resolution response object didDocumentMetadata composed of a method object, which includes a published property with value false.
    • +
    +
  6. +
  7. If the DID does exist and has not been deactivated: +
      +
    • The server MUST respond with HTTP Status Code 200.
    • +
    • The server MUST include the didDocument property, with its value set to the latest DID document.
    • +
    • The server MUST include the resolution response object didDocumentMetadata composed of a method object which includes a published property with value true.
    • +
    +
  8. +
  9. If the DID does exist and has been deactivated: +
      +
    • The server MUST respond with HTTP Status Code 200.
    • +
    • The server MUST include the didDocument property, with its value set to a valid empty DID document including the populated id property.
    • +
    • The server MUST include the resolution response object didDocumentMetadata which includes a deactivated property with value true.
    • +
    +
  10. +
  11. Otherwise, for failure, the server MUST respond with an appropriate HTTP Status Code (400, 401, 404, 500).
  12. +
+

§ Sidetree Operations

+

Sidetree operation requests to the REST API consist of a type property indicating the operation to be performed along with operation-specific properties and data.

+
EXAMPLE
{
+    "type": OPERATION_TYPE,
+    ...
+}
+
+
+

A valid Sidetree Operation Request is a JSON document composed as follows:

+
    +
  1. The Operation Request MUST contain a type property, and its value MUST be a valid operation defined in +File Structure. The defined operations are create, recover, deactivate, update.
  2. +
  3. Populate additional properties according to the appropriate subsection.
  4. +
  5. The client MUST POST the Operation Request JSON document to the Sidetree operation endpoint /operations under the desired REST server path.
  6. +
  7. The server MUST respond with HTTP status 200 when successful. Otherwise, for failure, the server MUST respond with an appropriate HTTP Status Code (400, 401, 404, 500). +
      +
    • In the case of a successful create operation, the server MUST return the DID Resolution Result for the DID as is detailed in Sidetree Resolution.
    • +
    +
  8. +
+

§ Create

+
EXAMPLE
{
+    "type": "create",
+    "suffixData": SUFFIX_DATA_OBJECT,
+    "delta": DELTA_OBJECT
+}
+
+
+

Use the following process to generate a Sidetree create operation JSON document for the REST API, composed as follows:

+
    +
  1. The object MUST contain a type property, and its value MUST be create.
  2. +
  3. The object MUST contain a suffixData property, and its value must be a Suffix Data Object(#core-index-file-create-entry).
  4. +
  5. The object MUST contain an delta property, and its value must be a Create Operation Data Object.
  6. +
+

§ Update

+
EXAMPLE
{
+    "type": "update",
+    "didSuffix": SUFFIX_STRING,
+    "revealValue": REVEAL_VALUE,
+    "delta": DELTA_OBJECT,
+    "signedData": JWS_SIGNED_VALUE
+}
+
+
+

Use the following process to generate a Sidetree update operation JSON document for the REST API, composed as follows:

+
    +
  1. The object MUST contain a type property, and its value MUST be update.
  2. +
  3. The object MUST contain a didSuffix property, and its value MUST be the DID Suffix of the DID the operation pertains to.
  4. +
  5. The object MUST contain a revealValue property, and its value MUST be the reveal value of the DID the operation pertains to.
  6. +
  7. The object MUST contain an delta property, and its value MUST be an Update Operation Delta Object.
  8. +
  9. The object MUST contain a signedData property, and its value MUST be an IETF RFC 7515 compliant JWS Compact +Serialization of the Update operation as defined in Provisional Index File.
  10. +
+

§ Recover

+
EXAMPLE
{
+    "type": "recover",
+    "didSuffix": SUFFIX_STRING,
+    "revealValue": REVEAL_VALUE,
+    "delta": DELTA_OBJECT,
+    "signedData": JWS_SIGNED_VALUE
+}
+
+
+

Use the following process to generate a Sidetree recovery operation JSON document for the REST API, composed as follows:

+
    +
  1. The object MUST contain a type property, and its value MUST be recover.
  2. +
  3. The object MUST contain a didSuffix property, and its value MUST be the DID Suffix of the DID the operation pertains to.
  4. +
  5. The object MUST contain a revealValue property, and its value MUST be the reveal value of the DID the operation pertains to.
  6. +
  7. The object MUST contain an delta property, and its value MUST be a Recovery Operation Delta Object.
  8. +
  9. The object MUST contain a signedData property, and its value MUST be an IETF RFC 7515 compliant JWS Compact +Serialization of the Recovery operation as defined in Core Index File.
  10. +
+

§ Deactivate

+
EXAMPLE
{
+    "type": "deactivate",
+    "didSuffix": SUFFIX_STRING,
+    "revealValue": REVEAL_VALUE,
+    "signedData": JWS_SIGNED_VALUE
+}
+
+
+

Use the following process to generate a Sidetree deactivate operation JSON document for the REST API, composed as follows:

+
    +
  1. The object MUST contain a type property, and its value MUST be deactivate.
  2. +
  3. The object MUST contain a didSuffix property, and its value MUST be the DID Suffix of the DID the operation pertains to.
  4. +
  5. The object MUST contain a revealValue property, and its value MUST be the reveal value of the DID the operation pertains to.
  6. +
  7. The object MUST contain a signedData property, and its value MUST be an IETF RFC 7515 compliant JWS Compact +Serialization of the Deactivate operation as defined in Core Index File.
  8. +
+ +
+ +
+ + + +
+ + + + + +
+
    +
    + + +
    + Table of Contents + +
    + +
    + +
    + + + + + + + \ No newline at end of file diff --git a/www/spec/v1.0.1/index.html b/www/spec/v1.0.1/index.html new file mode 100644 index 000000000..905870a87 --- /dev/null +++ b/www/spec/v1.0.1/index.html @@ -0,0 +1,2250 @@ + + + + + + + + + DIF Sidetree Protocol v1.0.1 + + + + + + + + + + + + + + + + + + + + + +
    + + + +
    +

    § Sidetree v1.0.1

    +

    Specification Status: DIF Ratified Specification

    +

    Latest published version: +identity.foundation/sidetree/spec

    +
    +
    Editors:
    +
    Daniel Buchner (Microsoft)
    +
    Orie Steele (Transmute)
    +
    Troy Ronda (SecureKey)
    +
    Contributors:
    +
    Henry Tsai (Microsoft)
    +
    Mudassir Ali (Microsoft)
    +
    Guillaume Dardelet (Transmute)
    +
    Isaac Chen (Microsoft)
    +
    Christian Lundkvist (Consensys)
    +
    Kyle Den Hartog (Mattr)
    +
    Tobias Looker (Mattr)
    +
    Participate:
    +
    GitHub repo
    +
    File a bug
    +
    Commit history
    +
    +

    Sidetree REST API specification: +identity.foundation/sidetree/api

    +
    +

    § Abstract

    +

    Sidetree is a protocol for creating scalable Decentralized Identifier networks that can run atop any existing decentralized anchoring system (e.g. Bitcoin, Ethereum, distributed ledgers, witness-based approaches) and be as open, public, and permissionless as the underlying anchoring systems they utilize. The protocol allows users to create globally unique, user-controlled identifiers and manage their associated PKI metadata, all without the need for centralized authorities or trusted third parties. The syntax of the identifier and accompanying data model used by the protocol is conformant with the W3C Decentralized Identifiers specification. Implementations of the protocol can be codified as their own distinct DID Methods and registered in the W3C DID Method Registry.

    +

    § Introduction

    +

    This section is non-normative

    +

    Decentralized ledgers (e.g. Bitcoin) introduced the first-ever solution to the chronological oracle problem, which unlocked the ability to create robust decentralized identifier networks. However, current approaches that utilize event anchoring systems to create decentralized identifier networks suffer from severely limited transactional volumes and other performance issues. Sidetree is a ‘Layer 2’ protocol that can be implemented atop any form of event anchoring system to enable scalable W3C Decentralized Identifier (DID) implementations that can be fully open, public, and permissionless. Sidetree is able to do all this without requiring trusted intermediaries, centralized authorities, special protocol tokens, or secondary consensus mechanisms, while preserving the core attributes of decentralization and immutability of the underlying anchoring systems it is implemented on.

    +

    Architecturally, Sidetree-based DID Method implementations are overlay networks composed of independent peer nodes (Sidetree nodes) that interact with an underlying decentralized anchoring system (as illustrated under Network Topology) to write, observe, and process replicated DID PKI state operations using deterministic protocol rules that produce an eventually strongly consistent view of all DIDs in the network. The Sidetree protocol defines a core set of DID PKI state change operations, structured as delta-based Conflict-Free Replicated Data Types (i.e. Create, Update, Recover, or Deactivate), that mutate a Decentralized Identifier’s DID Document state. Sidetree nodes that participate in writing operations into the overlay network do so by anchoring Content-Addressable Storage (CAS) (e.g. IPFS) references to aggregated bundles of operations in an underlying anchoring system. The anchoring system acts as a linear chronological sequencing oracle, which the protocol leverages to order DID PKI operations in an immutable history all observing nodes can replay and validate. It is this ability to replay the precise sequence of DID PKI state change events, and process those events using a common set of deterministic rules, that allows Sidetree nodes to achieve a consistent view of DIDs and their DID Document states, without requiring any additional consensus mechanism.

    +

    § Terminology

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TermDescription
    Anchoring SystemA decentralized sequencing oracle (e.g. Bitcoin, Ethereum, distributed ledgers, witness-based approaches) that can be used to determine the order of PKI state transformations for Decentralized Identifiers (DIDs), which can be deterministically verified to derive the current PKI state of DIDs.
    Witness System Synonym for Anchoring System, see above.
    Core Index FileJSON Document containing proving and index data for Create, Recovery, and Deactivate operations, and a CAS URI for the associated Provisional Index File. This file is anchored to the target anchoring system.
    Provisional Index FileJSON Document containing Update operation proving and index data, as well as CAS URI for Chunk File chunks.
    Core Proof FileJSON Document containing the cryptographic proofs for Recovery and Deactivate operations, which form the persistent backbone of DID PKI lineages.
    Provisional Proof FileJSON Document containing the cryptographic proofs for Update operations, which can be pruned via decentralized checkpointing mechanisms (this mechanism will arrive in future versions of the Sidetree protocol).
    Chunk FileJSON Document containing all verbose operation data for the corresponding set of DIDs specified in the related Provisional Index File.
    CASContent-addressable storage protocol/network (e.g. IPFS)
    CAS URIThe unique content-bound identifier used to locate a resource via the CAS protocol/network (e.g. IPFS)
    Commit ValueA chosen value that is used with a commitment scheme
    CommitmentThe output of a commitment scheme
    Commitment SchemeA cryptographic primative that allows one to commit to a chosen value, known as the commit value resulting in the generation of a commitment. A commitment can then be shared without revealing the commit value forming a proof of commitment where the possessor of the commit value can then later reveal the commit value proving the original commitment.
    DID DocumentJSON Document containing public key references, service endpoints, and other PKI metadata that corresponds to a given DID (as defined in the W3C DID Specification). This is the most common form of DID state used in Sidetree implementations.
    DID SuffixThe unique identifier string within a DID URI. e.g. The unique suffix of did:sidetree:123 would be 123.
    DID Suffix DataData required to deterministically generate a DID.
    Multihash Protocol for differentiating outputs from common cryptographic hash functions, addressing size and encoding considerations: https://multiformats.io/multihash/
    DID OperationSet of delta-based CRDT patches that modify a DID’s state data when applied.
    Operation RequestJWS formatted request sent to a Sidetree Node to include a DID Operation in a batch of operations.
    Update Key PairA cryptographic key used to produce an Update Request JWS. Public key representation MUST be used to produce Update Request commitment.
    Recovery Key PairA cryptographic key used to produce an Operation Request of type Recover or Deactivate. Public key representation MUST be used to produce Operation Request commitment.
    Public Key CommitmentThe resulting commitment obtained by applying the defined commitment scheme to a public key
    Recovery CommitmentThe resulting commitment obtained by applying the defined commitment scheme to the public key of a recovery key pair
    Sidetree NodeExecutable code that implements all the required components, functionality, and rules specified in the Sidetree protocol specification.
    TransactionAnchoring System transaction that anchors a set of Sidetree operations, via a CAS URI for an associated Core Index File.
    Anchor StringThe string anchored to the anchoring system, composed of the CAS URI to the Core Index File, prefixed with the declared operation count.
    Anchor TimeThe logical order of operations, as determined by the underlying anchoring system (e.g. Bitcoin block and transaction order). Anchoring systems may widely vary in how they determine the logical order of operations, but the only requirement of an anchoring system is that it can provide a means to deterministically order each operation within a DID’s operational lineage.
    Transaction Number A monotonically increasing number deterministically ordered and assigned to every transaction relative to its position in Anchor Time.
    Light Node A node that downloads and processes only Core Index Files and Provisional Index Files on a proactive basis, waiting until resolution time to download and process the Chunk File related to a given DID. This type of configuration enables a node to operate trustlessly while consuming approximately one order of magnitude less storage.
    +

    § Protocol Versioning

    +

    The rules and parameters of the Sidetree protocol MAY change in the future, resulting in new versions of the specification. The Sidetree specification and reference implementation follow SemVer 2.0.

    +

    Versions of the specification can be found on the Decentralized Identity Foundation’s website at the following version-based paths:

    +

    Latest Draft

    +
    https://identity.foundation/sidetree/spec/
    +
    +

    Specific Versions

    +
    https://identity.foundation/sidetree/spec/v<major>.<minor>.<patch>/
    +
    +

    Versions of the Sidetree reference implementation are also provided as npm modules and GitHub releases:

    +
    {
    +  "name": "@decentralized-identity/sidetree",
    +  "version": "<major>.<minor>.<patch>",
    +  ...
    +
    +

    § Version Segment Definitions

    +
      +
    • Major: Major protocol evolution, with breaking protocol advancements so large they warrant incrementing the major version.
    • +
    • Minor: Critical updates, protocol forking changes, or security patches that require all nodes to upgrade.
    • +
    • Patch: Non-critical changes that do not require nodes to upgrade.
    • +
    +

    § New Version Activation

    +

    New versions of the protocol, or modifications to parameter values by implementers, MUST be activated at a specified Anchor Time so all nodes can remain in sync by enforcing the same parameter configuration and protocol rules at the same logical starting point. All transactions that occur after the specified Anchor Time will adhere to the associated version’s rules and parameters until a newer version of the protocol is defined and implemented at a future Anchor Time.

    +

    § Default Parameters

    +

    Each version of the protocol will define a set of protocol rules and parameters with default suggested values. The following are the parameters used by this version of the Sidetree protocol - implementers MAY choose different values than the defaults listed below:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Protocol ParameterDescriptionSuggested Defaults
    HASH_ALGORITHMAlgorithm for generating hashes of protocol-related values.SHA256
    HASH_PROTOCOLProtocol for generating hash representations in Sidetree implementations, using the HASH_ALGORITHMMultihash
    DATA_ENCODING_SCHEMEEncoding selected for various data (JSON, hashes, etc.) used within an implementation, the output of which MUST be in ASCII format.Base64URL
    JSON_CANONICALIZATION_SCHEMEThe scheme selected for canonicalizing JSON structures used throughout the specification.JCS
    KEY_ALGORITHMAsymmetric public key algorithm for signing DID operations. Must be a valid JWK crv.secp256k1
    SIGNATURE_ALGORITHMAsymmetric public key signature algorithm. Must be a valid JWS alg.ES256K
    CAS_PROTOCOLThe CAS network protocol used within an implementation.IPFS
    CAS_URI_ALGORITHMAlgorithm for generating unique content-bound identifiers for the implementation-selected CAS protocol.IPFS CID
    COMPRESSION_ALGORITHMFile compression algorithm.GZIP
    REVEAL_VALUECryptographic hash of the commitment value.SHA256 Multihash (0x12)
    GENESIS_TIMEThe point in the target anchoring system’s transaction history at which Sidetree implementation is first activated (e.g. block number in a blockchain).630000
    MAX_CORE_INDEX_FILE_SIZEMaximum compressed Core Index File size.1 MB (zipped)
    MAX_PROVISIONAL_INDEX_FILE_SIZEMaximum compressed Provisional Index File size.1 MB (zipped)
    MAX_PROOF_FILE_SIZEMaximum compressed Proof File size.2.5 MB (zipped)
    MAX_CHUNK_FILE_SIZE Maximum compressed chunk file size.10 MB
    MAX_MEMORY_DECOMPRESSION_FACTORMaximum size after decompression.3x file size
    MAX_CAS_URI_LENGTHMaximum length of CAS URIs.100 bytes
    MAX_DELTA_SIZEMaximum canonicalized operation delta buffer size.1,000 bytes
    MAX_OPERATION_COUNTMaximum number of operations per batch.10,000 ops
    MAX_OPERATION_HASH_LENGTHMaximum length of all hashes in CAS URI files.100 bytes
    NONCE_SIZEThe number of bytes (octets) in nonce values.16 bytes
    +

    § Common Functions

    +

    The following is a list of functional procedures that are commonly used across the protocol. These functions are defined once here and referenced throughout the specification, wherever an implementer must invoke them to comply with normative processes.

    +

    § Hashing Process

    +

    All data hashed within the bounds of the protocol follow the same procedural steps, and yield a consistently encoded output. Given a data value, the following steps are used to generated a hashed output:

    +
      +
    1. Generate a hash of the data value using the HASH_PROTOCOL with the HASH_ALGORITHM.
    2. +
    3. Encode the resulting output using the DATA_ENCODING_SCHEME.
    4. +
    5. Return the encoded hashing output.
    6. +
    +

    Pseudo-code example using current protocol defaults:

    +
    let HashingOutput = Base64URL( Multihash(DATA, 0x12) );
    +
    +

    § Commitment Schemes

    +

    Commitment schemes are used by the Sidetree protocol in important ways to preserve the integrity of operations and assist in recovery.

    +

    § Public Key Commitment Scheme

    +

    The following steps define the commitment scheme for generating a public key commitment from a public key.

    +
      +
    1. Encode the public key into the form of a valid JWK.
    2. +
    3. Canonicalize the JWK encoded public key using the implementation’s JSON_CANONICALIZATION_SCHEME.
    4. +
    5. Use the implementation’s HASH_PROTOCOL to hash the canonicalized public key to generate the REVEAL_VALUE, then hash the resulting hash value again using the implementation’s HASH_PROTOCOL to produce the public key commitment.
    6. +
    +

    For maximum forward cryptographic security, implementers SHOULD NOT re-use public keys across different commitment invocations. +Implementers MUST NOT re-use public key JWK payloads across different commitment invocations.

    +

    § JWK Nonce

    +

    Implementers MAY define the nonce property in the public key JWK payload. +The nonce property enables the re-use of public keys across commitments without re-using the public key JWK payloads. +If the nonce property is defined by the implementer, the DID Owner MAY populate the nonce property in the public key JWK payload. +If the nonce property is populated, the value of the nonce property MUST be of size NONCE_SIZE and be encoded using with Base64URL encoding.

    +

    § Network Topology

    +

    The figure below illustrates the three primary components of a Sidetree-based DID overlay network:

    +
      +
    1. The underlying anchoring system that serves as the global anchoring and linear sequencing system for DID operations.
    2. +
    3. The Sidetree nodes themselves, which interact with the anchoring system to anchor operations, fetch and replicate data from the CAS network, and process operations in accordance with the protocol deterministic ruleset.
    4. +
    5. An integrated Content-Addressable Storage (CAS) network layer Sidetree nodes use to distribute and replicate DID operation files.
    6. +
    + +

    § File Structures

    +

    The protocol defines the following three file structures, which house DID operation data and are designed to support key functionality to enable light node configurations, minimize permanently retained data, and ensure performant resolution of DIDs.

    + +

    § Core Index File

    +

    Core Index Files contain Create, Recover, and Deactivate operation values, as well as a CAS URI for the related Provisional Index File (detailed below). As the name suggests, Core Index Files are anchored to the target anchoring system via embedding a CAS URI in the anchoring system’s transactional history.

    +
    EXAMPLE
    {
    +  "coreProofFileUri": CAS_URI,
    +  "provisionalIndexFileUri": CAS_URI,
    +  "writerLockId": OPTIONAL_LOCKING_VALUE,
    +  "operations": {
    +    "create": [
    +      {
    +        "suffixData": {
    +          "type": TYPE_STRING,
    +          "deltaHash": DELTA_HASH,
    +          "recoveryCommitment": COMMITMENT_HASH
    +        }
    +      },
    +      {...}
    +    ],
    +    "recover": [
    +      {
    +        "didSuffix": SUFFIX_STRING,
    +        "revealValue": MULTIHASH_OF_JWK
    +      },
    +      {...}
    +    ],
    +    "deactivate": [
    +      {
    +        "didSuffix": SUFFIX_STRING,
    +        "revealValue": MULTIHASH_OF_JWK
    +      },
    +      {...}
    +    ]
    +  }
    +}
    +
    +
    +

    A valid Core Index File is a JSON document that MUST NOT exceed the MAX_CORE_INDEX_FILE_SIZE. Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, MUST result in an invalidation of the entire file.

    +

    The Core Index File JSON document is composed as follows:

    +
      +
    1. The Core Index File MUST contain a provisionalIndexFileUri property if the batch of transactions being anchored contains any Create, Recovery, or Update operations, and its value MUST be a CAS URI for the related Provisional Index File. If the batch of transactions being anchored is only comprised of Deactivate operations, the provisionalIndexFileUri property MUST NOT be present.
    2. +
    3. The Core Index File MUST contain a coreProofFileUri property if the batch of transactions being anchored contains any Recovery or Deactivate operations, and its value MUST be a CAS URI for the related Core Proof File.
    4. +
    5. The Core Index File MAY contain a writerLockId if the implementation chooses to implement an mechanism that requires embedded anchoring information, and if present, its value MUST comply with the specifications of the implementation.
    6. +
    7. If the set of operations to be anchored contain any Create, Recover, or Deactivate operations, the Core Index File MUST contain an operations property, and its value MUST be an object, composed as follows: +
        +
      • If there are any Create operations to be included in the Core Index File: +
          +
        1. The operations object MUST include a create property, and its value MUST be an array.
        2. +
        3. For each Create operation to be included in the create array, herein referred to as Core Index File Create Entries, use the following process to compose and include a JSON object for each entry: + +
        4. +
        5. The Core Index File MUST NOT include multiple Create operations that produce the same DID Suffix.
        6. +
        +
      • +
      • If there are any Recovery operations to be included in the Core Index File: +
          +
        1. The operations object MUST include a recover property, and its value MUST be an array.
        2. +
        3. For each Recovery operation to be included in the recover array, herein referred to as Core Index File Recovery Entries, use the following process to compose and include entries: +
            +
          • The object MUST contain a didSuffix property, and its value MUST be the DID Suffix of the DID the operation pertains to. An Core Index File MUST NOT contain more than one operation of any type with the same DID Suffix.
          • +
          • The object MUST contain a revealValue property, and its value MUST be the REVEAL_VALUE of the last update commitment.
          • +
          +
        4. +
        +
      • +
      • If there are any Deactivate operations to be included in the Core Index File: +
          +
        1. The operations object MUST include a deactivate property, and its value MUST be an array.
        2. +
        3. For each Deactivate operation to be included in the deactivate array, use the following process to compose and include entries: +
            +
          • The object MUST contain a didSuffix property, and its value MUST be the DID Suffix of the DID the operation pertains to. An Core Index File MUST NOT contain more than one operation of any type with the same DID Suffix.
          • +
          • The object MUST contain a revealValue property, and its value MUST be the REVEAL_VALUE of the last update commitment.
          • +
          +
        4. +
        +
      • +
      +
    8. +
    +

    § Provisional Index File

    +

    Provisional Index Files contain Update operation proving data, as well as CAS URI links to Chunk Files.

    +
    EXAMPLE
    {
    +  "provisionalProofFileUri": CAS_URI,
    +  "chunks": [
    +    { "chunkFileUri": CAS_URI },
    +    {...}
    +  ],
    +  "operations": {
    +    "update": [
    +      {
    +        "didSuffix": SUFFIX_STRING,
    +        "revealValue": MULTIHASH_OF_JWK
    +      },
    +      {...}
    +    ]
    +  }
    +}
    +
    +
    +

    A valid Provisional Index File is a JSON document that MUST NOT exceed the MAX_PROVISIONAL_INDEX_FILE_SIZE. Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, MUST result in an invalidation of the entire file.

    +

    The Provisional Index File JSON document is composed as follows:

    +
      +
    1. The Provisional Index File MUST contain a provisionalProofFileUri property if the batch of transactions being anchored contains any Update operations, and its value MUST be a CAS URI for the related Provisional Proof File.
    2. +
    3. The Provisional Index File MUST contain a chunks property, and its value MUST be an array of Chunk Entries for the related delta data for a given chunk of operations in the batch. Future versions of the protocol will specify a process for separating the operations in a batch into multiple Chunk Entries, but for this version of the protocol there MUST be only one Chunk Entry present in the array. Chunk Entry objects are composed as follows: +
        +
      1. The Chunk Entry object MUST contain a chunkFileUri property, and its value MUST be a URI representing the corresponding CAS file entry, generated via the CAS_URI_ALGORITHM.
      2. +
      +
    4. +
    5. If there are any operation entries to be included in the Provisional Index File (currently only Update operations), the Provisional Index File MUST include an operations property, and its value MUST be an object composed as follows: +
        +
      • If there are any Update entries to be included: +
          +
        1. The operations object MUST include an update property, and its value MUST be an array.
        2. +
        3. For each Update operation to be included in the update array, herein referred to as Provisional Index File Update Entries, use the following process to compose and include entries: +
            +
          • The object MUST contain an didSuffix property, and its value MUST be the DID Suffix of the DID the operation pertains to, with a maximum length as specified by the MAX_OPERATION_HASH_LENGTH.
          • +
          • The object MUST contain a revealValue property, and its value MUST be the REVEAL_VALUE of the last update commitment, with a maximum length as specified by the MAX_OPERATION_HASH_LENGTH.
          • +
          +
        4. +
        +
      • +
      +
    6. +
    +

    § Core Proof File

    +

    Core Proof Files are compressed JSON Documents containing the cryptographic proofs (signatures, hashes, etc.) that form the signature-chained backbone for the state lineages of all DIDs in the system. The cryptographic proofs present in Core Proof Files also link a given operation to its verbose state data, which resides in an related Chunk File.

    +
    EXAMPLE
    {
    +  "operations": {
    +    "recover": [
    +      {
    +        "signedData": {
    +          "protected": {...},
    +          "payload": {
    +            "recoveryCommitment": COMMITMENT_HASH,
    +            "recoveryKey": JWK_OBJECT,
    +            "deltaHash": DELTA_HASH
    +          },
    +          "signature": SIGNATURE_STRING
    +        }
    +      },
    +      {...}
    +    ],
    +    "deactivate": [
    +      {
    +        "signedData": {
    +          "protected": {...},
    +          "payload": {
    +            "didSuffix": SUFFIX_STRING,
    +            "recoveryKey": JWK_OBJECT
    +          },
    +          "signature": SIGNATURE_STRING
    +        }
    +      },
    +      {...}
    +    ]
    +  }
    +}
    +
    +
    +

    Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, MUST result in an invalidation of the entire file.

    +

    In this version of the protocol, Core Proof Files are constructed as follows:

    +
      +
    1. The Core Proof File MUST include an operations property, and its value MUST be an object containing cryptographic proof entries for any Recovery and Deactivate operations to be included in a batch. Include the Proof Entries as follows: + +
    2. +
    +

    § Provisional Proof File

    +

    Provisional Proof Files are compressed JSON Documents containing the cryptographic proofs (signatures, hashes, etc.) for all the (eventually) prunable DID operations in the system. The cryptographic proofs present in Provisional Proof Files also link a given operation to its verbose state data, which resides in an related Chunk File.

    +
    EXAMPLE
    {
    +  "operations": {
    +    "update": [
    +      {
    +        "signedData": {
    +          "protected": {...},
    +          "payload": {
    +            "updateKey": JWK_OBJECT,
    +            "deltaHash": DELTA_HASH
    +          },
    +          "signature": SIGNATURE_STRING
    +        }
    +      },
    +      {...}
    +    ]
    +  }
    +}
    +
    +
    +

    Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, MUST result in an invalidation of the entire file.

    +

    In this version of the protocol, Provisional Proof Files are constructed as follows:

    +
      +
    1. The Provisional Proof File MUST include an operations property, and its value MUST be an object containing cryptographic proof entries for any Recovery and Deactivate operations to be included in a batch. Include the Proof Entries as follows: + +
    2. +
    +

    § Chunk Files

    +

    Chunk Files are JSON Documents, compressed via the COMPRESSION_ALGORITHM, that contain Sidetree Operation source data, which are composed of delta-based CRDT entries that modify the state of a Sidetree identifier’s DID state.

    +

    For this version of the protocol, there will only exist a single Chunk File that contains all the state modifying data for all operations in the included set. Future versions of the protocol will separate the total set of included operations into multiple chunks, each with their own Chunk File.

    +
    EXAMPLE
    {
    +  "deltas": [
    +       
    +    {
    +      "patches": PATCH_ARRAY,
    +      "updateCommitment": COMMITMENT_HASH
    +    },
    +    ...
    +  ]
    +}
    +
    +
    +

    Any unknown properties in this file not defined by this specification or specifically permitted by the implementer, MUST result in an invalidation of the entire file.

    +

    In this version of the protocol, Chunk Files are constructed as follows:

    +
      +
    1. +

      The Chunk File MUST include a deltas property, and its value MUST be an array containing Chunk File Delta Entry objects.

      +
    2. +
    3. +

      Each Chunk File Delta Entry MUST be a JSON object serialized via the JSON_CANONICALIZATION_SCHEME, assembled as follows:

      +
        +
      1. The object MUST contain a patches property, and its value MUST be an array of DID State Patches.
      2. +
      3. The payload MUST contain an updateCommitment property, and its value MUST be the next Update Commitment generated during the operation process associated with the type of operation being performed.
      4. +
      +
    4. +
    5. +

      Each Chunk File Delta Entry MUST be appended to the deltas array as follows, in this order:

      +
        +
      1. If any Create operations were present in the associated Core Index File, append all Create Operation Delta Objects in the same index order as their matching Core Index File Create Entry.
      2. +
      3. If any Recovery operations were present in the associated Core Index File, append all Recovery Operation Delta Objects in the same index order as their matching Core Index File Recovery Entry.
      4. +
      5. If any Update operations were present in the associated Provisional Index File, append all Update Operation Delta Objects in the same index order as their matching Provisional Index File Update Entry.
      6. +
      +
    6. +
    +

    § DID URI Composition

    +

    DID Methods based on the Sidetree protocol all share the same identifier format. The unique identifier segment of a Sidetree-based DID, known as the DID Suffix, is derived based on the initial state of the DID’s state data. The DID Suffix is cryptographically bound to the initial PKI state of the DID, which means Sidetree DIDs are self-certifying. As a result, a person or entity who creates a Sidetree-based DID knows their unique identifier at the moment of generation, and it is cryptographic secured for instant use (for more on the instant use capabilities of Sidetree DIDs, see Unpublished DID Resolution).

    +

    To generate the Short-Form DID URI of a Sidetree DID, use the Hashing Process to generate a hash of the canonicalized Create Operation Suffix Data Object. The following is an example of a resulting colon (:) separated DID URI composed of the URI scheme (did:), Method identifier (sidetree:), and unique identifier string (EiBJz4...):

    +

    Format of Short-form DID URI:

    +
    did:METHOD:<did-suffix>
    +
    +

    Example of Short-Form DID URI:

    +
    did:sidetree:EiDahaOGH-liLLdDtTxEAdc8i-cfCz-WUcQdRJheMVNn3A
    +
    +

    An implementer MAY define additional components in their method’s DID URI composition.

    +
    NOTE

    Many implementations have multiple active network instances of their DID Method (e.g. mainnet and testnet). How different network instances of a DID Method are represented in the DID URI string is method-specific. Many methods choose to use the base format above (did:METHOD) as their primary/mainnet network, and add an additional segment after the :METHOD segment to denote other network instances, for example: did:METHOD:testnet. DID Methods SHOULD clearly describe parsing rules for distinguishing between their different network instances.

    +
    +

    § Long-Form DID URIs

    +

    In many DID Methods, there is a period of time (which may be indefinite) +between the generation of a DID and the DID operation being anchored, +propagated, and processed in the underlying anchoring and storage +systems. In order to account for this, Sidetree introduces an equivalent +variant of Sidetree-based DIDs that is self-certifying and self-resolving, +known as the Long-Form DID URI. +The Long-Form DID URI variant of Sidetree-based DIDs +enables DIDs to be immediately resolvable after generation by including +the DID’s initial state data within the Long-Form DID URI +itself. Sidetree Long-Form DID URIs +are the Short-Form DID URI with an additional +colon-separated (:) segment appended to the end. The value of this final +URI segment is a canonicalized JSON data payload composed of the +Create Operation Suffix data and the +Create Operation Delta data, encoded +via the implementation’s DATA_ENCODING_SCHEME.

    +

    Long-form DID JSON data payload:

    +
    {
    +  "delta": {
    +    "patches": [
    +      {
    +        "action": "replace",
    +        "document": {
    +          "publicKeys": [
    +            {
    +              "id": "anySigningKeyId",
    +              "publicKeyJwk": {
    +                "crv": "secp256k1",
    +                "kty": "EC",
    +                "x": "H61vqAm_-TC3OrFSqPrEfSfg422NR8QHPqr0mLx64DM",
    +                "y": "s0WnWY87JriBjbyoY3FdUmifK7JJRLR65GtPthXeyuc"
    +              },
    +              "purposes": [
    +                "auth"
    +              ],
    +              "type": "EcdsaSecp256k1VerificationKey2019"
    +            }
    +          ],
    +          "services": [
    +            {
    +              "id": "anyServiceEndpointId",
    +              "type": "anyType",
    +              "serviceEndpoint": "http://any.endpoint"
    +            }
    +          ]
    +        }
    +      }
    +    ],
    +    "updateCommitment": "EiBMWE2JFaFipPdthcFiQek-SXTMi5IWIFXAN8hKFCyLJw"
    +  },
    +  "suffixData": {
    +    "deltaHash": "EiBP6gAOxx3YOL8PZPZG3medFgdqWSDayVX3u1W2f-IPEQ",
    +    "recoveryCommitment": "EiBg8oqvU0Zq_H5BoqmWf0IrhetQ91wXc5fDPpIjB9wW5w"
    +  }
    +}
    +
    +

    Format of Long-Form DID URI:

    +
    did:METHOD:<did-suffix>:<long-form-suffix-data>
    +
    +

    Example of Long-Form DID URI:

    +
    did:sidetree:EiDahaOGH-liLLdDtTxEAdc8i-cfCz-WUcQdRJheMVNn3A:eyJkZWx0YSI6eyJwYXRjaGVzIjpbeyJhY3Rpb24iOiJyZXBsYWNlIiwiZG9jdW1lbnQiOnsicHVibGljX2tleXMiOlt7ImlkIjoiYW55U2lnbmluZ0tleUlkIiwiandrIjp7ImNydiI6InNlY3AyNTZrMSIsImt0eSI6IkVDIiwieCI6Ikg2MXZxQW1fLVRDM09yRlNxUHJFZlNmZzQyMk5SOFFIUHFyMG1MeDY0RE0iLCJ5IjoiczBXbldZODdKcmlCamJ5b1kzRmRVbWlmSzdKSlJMUjY1R3RQdGhYZXl1YyJ9LCJwdXJwb3NlIjpbImF1dGgiXSwidHlwZSI6IkVjZHNhU2VjcDI1NmsxVmVyaWZpY2F0aW9uS2V5MjAxOSJ9XSwic2VydmljZV9lbmRwb2ludHMiOlt7ImVuZHBvaW50IjoiaHR0cDovL2FueS5lbmRwb2ludCIsImlkIjoiYW55U2VydmljZUVuZHBvaW50SWQiLCJ0eXBlIjoiYW55VHlwZSJ9XX19XSwidXBkYXRlX2NvbW1pdG1lbnQiOiJFaUJNV0UySkZhRmlwUGR0aGNGaVFlay1TWFRNaTVJV0lGWEFOOGhLRkN5TEp3In0sInN1ZmZpeF9kYXRhIjp7ImRlbHRhX2hhc2giOiJFaUJQNmdBT3h4M1lPTDhQWlBaRzNtZWRGZ2RxV1NEYXlWWDN1MVcyZi1JUEVRIiwicmVjb3ZlcnlfY29tbWl0bWVudCI6IkVpQmc4b3F2VTBacV9INUJvcW1XZjBJcmhldFE5MXdYYzVmRFBwSWpCOXdXNXcifX0
    +
    +

    The Long-Form DID URI variant of Sidetree-based DIDs supports the following features and usage patterns:

    +
      +
    • Resolving the DID Documents of unpublished DIDs.
    • +
    • Authenticating with unpublished DIDs.
    • +
    • Signing and verifying credentials signed against unpublished DIDs.
    • +
    • After publication and propagation are complete, authenticating with either the Short-Form DID URI or Long-Form DID URI.
    • +
    • After publication and propagation are complete, signing and verifying credentials signed against either the Short-Form DID URI or Long-Form DID URI.
    • +
    +

    § JSON Web Signatures

    +

    Sidetree relies on JSON Web Signatures for authentication and integrity protection of DID Operations, except for Create, with contains key material and is self certifying.

    +

    § Signing

    +

    In addition to RFC7515, the following additional requirements MUST be observed by Sidetree Method implementeers.

    +
      +
    1. kid MAY be present in the protected header.
    2. +
    3. alg MUST be present in the protected header, its value MUST NOT be none.
    4. +
    5. No additional members may be present in the protected header.
    6. +
    +

    Here is an example of a decoded JWS header:

    +
    {
    +  "kid": "did:example:123#_Qq0UL2Fq651Q0Fjd6TvnYE-faHiOpRlPVQcY_-tA4A",
    +  "alg": "EdDSA"
    +}
    +
    +
    WARNING

    It is recommended that kid be a DID URL. If it is not, method implementers might need to rely on additional context to uniquely identify the correct verificationMethod.

    +
    +

    § Verifying

    +

    Regardless of which verification relationship a verificationMethod is associated with, the process of verifying a JWS linked to a DID is the same.

    +

    The JWS header is parsed and a kid is extracted.

    +
      +
    1. Iterate the verificationMethods, until a verificationMethod with id equal to kid is found.
    2. +
    3. Convert the discovered verificationMethod to JWK if necessary.
    4. +
    5. Perform JWS Verification using the JWK.
    6. +
    +

    § Operation Verification

    +

    Sidetree operations are considered valid when the JWS can be verified with the correct key pair designated for the type of operation being invoked (i.e. update, recover, deactivate).

    +

    An Update Operation MUST be signed by the currently valid Update Key Pair.

    +

    A Recover Operation MUST by signed by the currently valid Recovery Key Pair.

    +

    A Deactivate Operation MUST by signed by the currently valid Recovery Key Pair.

    +
    WARNING

    Signatures on operations may be valid, but operations may be deemed invalid for other reasons (e.g. malformed delta payload or being stale).

    +
    +
    WARNING

    It is not recommended to reuse verificationMethods for multiple verification relationships.

    +
    +

    § Operation Anchoring Time Ranges

    +

    A Sidetree-based DID Method MAY define the anchorFrom and/or anchorUntil properties as part of the operation’s data object payload. +If anchorFrom is defined by the implementer, a DID owner MAY include the earliest allowed anchoring time for their operation in the anchorFrom property of the operation’s data object payload. +The anchorFrom property is conceptually similar to the RFC7519 nbf and iat claims. +If anchorUntil is defined by the implementer, a DID owner MAY include the latest allowed anchoring time for their operation in the anchorUntil property of the operation’s data object payload. +The anchorUntil property is conceptually similar to the RFC7519 exp claim. +These properties contain numeric values; but note that anchoring systems may have differing mechanisms of time (as defined by the method).

    +

    A Sidetree-based DID Method MAY require validation for rejecting stale operations. +An operation is considered stale relative to the timing information provided by the underlying anchoring system. +When an operation is stale according to the DID method’s parameters, the operation is deemed as invalid. +During processing, if the DID method validates stale operations, the DID owner’s operation time range is compared to the anchoring system’s timing information. +Operations that are anchored prior to anchorFrom are deemed invalid, if anchorFrom is set. +Operations that are anchored after anchorUntil are deemed invalid, if anchorUntil is set (or implicitly defined). +If the operation is deemed invalid, skip the entry and iterate forward to the next entry.

    +

    A Sidetree-based DID Method MAY constrain the range between anchorFrom and anchorUntil using a delta defined by the implementation. +The implementer MAY also implicitly define the anchorUntil using the anchorFrom plus a delta defined by the implementation. +The delta MAY be defined as the MAX_OPERATION_TIME_DELTA protocol parameter.

    +

    § DID Operations

    +

    Sidetree-based DIDs support a variety of DID operations, all of which require the DID owner to generate specific data values and cryptographic material. The sections below describe how to perform each type of operation, and how those operations are represented in the CAS-replicated files that are anchored to the underlying anchoring system.

    +

    While virtually all DID owners will engage User Agent applications on their local devices to perform these operations, most will not generate the anchoring transactions on the underlying anchoring system. Instead, most users will likely send the anchoring-related operation values they generate to external nodes for anchoring. This is relatively safe, because operations require signatures that an external node cannot forge. The only attack available to a rogue node operator is to not anchor the operations a DID owner sends them. However, the DID owner can detect this (via a scan of subsequent blocks) and send their operation to a different node or do it themselves, if they so desire.

    +

    It is strongly advised that DID owners and User Agents (e.g. wallet apps) retain their DID operations and operation-anchoring files. Doing so is helpful in cases where users, or their User Agent, need to quickly access the operations and operation-anchoring files, or a user wishes to individually persist their operation and operation-anchoring files on the CAS network for even greater independent availability assurance.

    +
    NOTE

    This specification does not define an API for sending public DID operation values to third-party Sidetree nodes for external anchoring, as that is an elective activity has no bearing on the technical workings of the protocol, its capabilities, or its security guarantees.

    +
    +
    WARNING

    Operations other than Create contain a compact JWS. Dereferencing of key material used to verify the JWS is a DID Method specific concern. Some methods may rely of the DID Document data model, others may rely on an internal data model. Some methods may rely on kid of the form did:example:123#fingerprint, others may not include a kid in the JWS, or its value may be arbitrary. Support for specific alg fields is also DID Method specific. Implementers are cautioned to choose support for specific alg values carefully.

    +
    +

    § Create

    +

    Use the following process to generate a Sidetree-based DID:

    +
      +
    1. Generate a key pair using the defined KEY_ALGORITHM, let this be known as the Update Key Pair.
    2. +
    3. Generate a public key commitment using the defined public key commitment scheme and public key of the generated Update Key Pair, let this resulting commitment be known as the update commitment.
    4. +
    5. Generate a canonicalized representation of the following object using the implementation’s JSON_CANONICALIZATION_SCHEME, herein referred to as the Create Operation Delta Object:
      {
      +  "patches": [ PATCH_1, PATCH_2, ... ],
      +  "updateCommitment": COMMITMENT_HASH
      +}
      +
      +
        +
      • The object MUST contain a patches property, and its value MUST be a JSON array of DID State Patches.
      • +
      • The object MUST contain an updateCommitment property, and its value MUST be the update commitment as generated in step 2.
      • +
      +
    6. +
    7. Generate a key pair using the defined KEY_ALGORITHM, let this be known as the recovery key pair, where the public key of this pair is used for generating the recovery commitment, and the private key for use in the next recovery operation.
    8. +
    9. Generate a public key commitment using the defined public key commitment scheme and public key of the generated recovery key pair, let this resulting commitment be known as the recovery commitment.
    10. +
    11. Generate a canonicalized representation of the following object using the implementation’s JSON_CANONICALIZATION_SCHEME, herein referred to as the Create Operation Suffix Data Object:
      {
      +  "type": TYPE_STRING,
      +  "deltaHash": DELTA_HASH,
      +  "recoveryCommitment": COMMITMENT_HASH,
      +  "anchorOrigin": ANCHOR_ORIGIN
      +}
      +
      +
        +
      • The object MAY contain a type property, and if present, its value MUST be a type string, of a length and composition defined by the implementation, that signifies the type of entity a DID represents.
      • +
      • The object MUST contain a deltaHash property, and its value MUST be a hash of the canonicalized Create Operation Delta Object (detailed above), generated via the HASH_PROTOCOL.
      • +
      • The object MUST contain a recoveryCommitment property, and its value MUST be the recovery commitment as generated in step 5.
      • +
      • The object MAY contain an anchorOrigin property if an implemention defines this property. This property signifies the implementer-defined system(s) that know the most recent anchor for this DID. The property’s type and composition is defined by the implementation. Implementers MAY define this property since implementers with a single common anchoring system do not need to support this property.
      • +
      +
    12. +
    +
    NOTE

    Implementations MAY choose to define additional properties for inclusion in the Create Operation Suffix Data Object, but the presence of any properties beyond the standard properties or implementation-defined properties ARE NOT permitted.

    +
    +
    WARNING

    The string values used in the type field must be carefully considered, and this specification strongly cautions implementers to avoid allowing any values that represent humans, groups of humans, or any human-identifying classifications.

    +
    +

    § Update

    +

    The following process must be used to update the state a Sidetree-based DID:

    +
      +
    1. Retrieve the Update Reveal Value that matches the previously anchored Update Commitment.
    2. +
    3. Generate a canonicalized representation of the following object using the implementation’s JSON_CANONICALIZATION_SCHEME, herein referred to as the Update Operation Delta Object, composed as follows:
      {
      +  "patches": [ PATCH_1, PATCH_2, ... ],
      +  "updateCommitment": COMMITMENT_HASH
      +}
      +
      +
        +
      • The object MUST contain a patches property, and its value MUST be an array of DID State Patches.
      • +
      • The object MUST contain a updateCommitment property, and its value MUST be a new Update Commitment, the value of which will be revealed for the next Update operation.
      • +
      +
    4. +
    5. Generate an IETF RFC 7515 compliant compact JWS representation of the following object, herein referred to as the Update Operation Signed Data Object, with a signature that validates against a currently active update key, and contains the following payload values:
      {
      +  "protected": {...},
      +  "payload": {
      +    "updateKey": JWK_OBJECT,
      +    "deltaHash": DELTA_HASH
      +  },
      +  "signature": SIGNATURE_STRING
      +}
      +
      +
        +
      • The JWS payload object MUST include a updateKey property, and its value MUST be the IETF RFC 7517 compliant JWK representation matching the previous Update Commitment.
      • +
      • The JWS payload object MUST contain a deltaHash property, and its value MUST be a hash of the canonicalized Update Operation Delta Object, generated via the HASH_PROTOCOL, with a maximum length as specified by the MAX_OPERATION_HASH_LENGTH.
      • +
      +
    6. +
    +

    § Recover

    +

    Use the following process to recover a Sidetree-based DID:

    +
      +
    1. Retrieve the Recovery Key that matches the previously anchored Recovery Commitment. This value will be used in constructing an Core Index File Recovery Entry for the DID being recovered.
    2. +
    3. Generate a new recovery key pair, which MUST NOT be the same key used in any previous operations, via the KEY_ALGORITHM, retaining the Next Recovery Public Key for use in generating the next Recovery Commitment, and the private key for use in the next Recovery operation.
    4. +
    5. Create a Recovery Commitment using the Hashing Process to generate a hash value from the canonicalized IETF RFC 7517 JWK representation (using the implementation’s JSON_CANONICALIZATION_SCHEME) of the Next Recovery Public Key, and retain the hash value for inclusion in an Core Index File.
    6. +
    7. Generate a new Update Key Pair, which SHOULD NOT be the same key used in any previous operations, via the KEY_ALGORITHM, retaining the Next Update Public Key for use in generating the next Update Commitment, and the private key for use in the next Update operation.
    8. +
    9. Create an Update Commitment using the Hashing Process to generate a hash value from the canonicalized IETF RFC 7517 JWK representation (using the implementation’s JSON_CANONICALIZATION_SCHEME) of the Next Update Public Key, and retain the hash value for inclusion in the Recovery Operation Delta Object (as described below).
    10. +
    11. Generate and retain a COMMITMENT_VALUE, in adherence with the Commitment Schemes directives, for use in the next Update operation, herein referred to as the Update Reveal Value.
    12. +
    13. Generate an Update Commitment using the Hashing Process, in adherence with the Commitment Schemes directives, to generate a hash of the Update Reveal Value, and retain the resulting hash value for inclusion in an Core Index File.
    14. +
    15. Generate a canonicalized representation of the following object using the implementation’s JSON_CANONICALIZATION_SCHEME, herein referred to as the Recovery Operation Delta Object, composed as follows:
      {
      +  "patches": [ PATCH_1, PATCH_2, ... ],
      +  "updateCommitment": COMMITMENT_HASH
      +}
      +
      +
        +
      • The object MUST contain a patches property, and its value MUST be an array of DID State Patches.
      • +
      • The object MUST contain a updateCommitment property, and its value MUST be the Update Commitment, as described above.
      • +
      +
    16. +
    17. Generate an IETF RFC 7515 compliant compact JWS representation of the following object, herein referred to as the Recovery Operation Signed Data Object, with a signature that validates against a currently active recovery key, and contains the following payload values:
      {
      +  "protected": {...},
      +  "payload": {
      +    "recoveryCommitment": COMMITMENT_HASH,
      +    "recoveryKey": JWK_OBJECT,
      +    "deltaHash": DELTA_HASH,
      +    "anchorOrigin": ANCHOR_ORIGIN
      +  },
      +  "signature": SIGNATURE_STRING
      +}
      +
      +
        +
      • The JWS payload object MUST contain a recoveryCommitment property, and its value MUST be the next Recovery Commitment, as described above, with a maximum length as specified by the MAX_OPERATION_HASH_LENGTH.
      • +
      • The JWS payload object MUST include a recoveryKey property, and its value MUST be the IETF RFC 7517 JWK representation matching the previous Recovery Commitment.
      • +
      • The JWS payload object MUST contain a deltaHash property, and its value MUST be a hash of the canonicalized Recovery Operation Delta Object, generated via the HASH_PROTOCOL, with a maximum length as specified by the MAX_OPERATION_HASH_LENGTH.
      • +
      • The JWS payload object MAY contain an anchorOrigin property if an implemention defines this property. This property signifies the implementer-defined system(s) that know the most recent anchor for this DID. The property’s type and composition is defined by the implementation. Implementers MAY define this property since implementers with a single common anchoring system do not need to support this property.
      • +
      +
    18. +
    +

    § Deactivate

    +

    The following process must be used to deactivate a Sidetree-based DID:

    +
      +
    1. Retrieve the Recovery Reveal Value that matches the previously anchored Recovery Commitment.
    2. +
    3. Generate a IETF RFC 7515 compliant compact JWS object, herein referred to as the Deactivate Operation Signed Data Object, with a signature that validates against the currently active recovery key, and contains the following payload values:
      {
      +  "protected": {...},
      +  "payload": {
      +    "didSuffix": SUFFIX_STRING,
      +    "recoveryKey": JWK_OBJECT
      +  },
      +  "signature": SIGNATURE_STRING
      +}
      +
      +
        +
      • The JWS payload object MUST contain a didSuffix property, and its value MUST be the DID Suffix of the DID the operation pertains to, with a maximum length as specified by the MAX_OPERATION_HASH_LENGTH.
      • +
      • The JWS payload object MUST include a recoveryKey property, and its value MUST be the IETF RFC 7517 JWK representation matching the previous Recovery Commitment.
      • +
      +
    4. +
    +

    § DID State Patches

    +

    Sidetree defines a delta-based Conflict-Free Replicated Data Type system, wherein the metadata in a Sidetree-based implementation is controlled by the cryptographic PKI material of individual entities in the system, represented by DIDs. While the most common form of state associated with the DIDs in a Sidetree-based implementation is a DID Document, Sidetree can be used to maintain any type of DID-associated state.

    +

    Sidetree specifies a general format for patching the state associated with a DID, called Patch Actions, which define how to deterministic mutate a DID’s associated state. Sidetree further specifies a standard set of Patch Actions (below) implementers MAY use to facilitate DID state patching within their implementations. Support of the standard set of Patch Actions defined herein IS NOT required, but implementers MUST use the Patch Action format for defining patch mechanisms within their implementation. The general Patch Action format is defined as follows:

    +
    {
    +  "action": "add-public-keys",
    +  ...
    +}
    +
    +{
    +  "action": "-custom-action",
    +  ...
    +}
    +
    +
      +
    1. Patch Actions MUST be represented as JSON objects.
    2. +
    3. Patch Action objects MUST include an action property, and its value SHOULD be one of the standard Patch Action types listed in below, or, if the implementer chooses to create a custom Patch Action, a kebab-case string (dash-delimited lowercase words) with a leading dash, to indicate a custom Patch Action, for example: -custom-action. +
        +
      • add-public-keys
      • +
      • remove-public-keys
      • +
      • add-services
      • +
      • remove-services
      • +
      • ietf-json-patch
      • +
      +
    4. +
    +

    § Standard Patch Actions

    +

    The following set of standard Patch Actions are specified to help align on a common set of Patch Actions that provide a predictable usage pattern across Sidetree-based DID Method implementations.

    +

    § add-public-keys

    +

    The add-public-keys Patch Action describes the addition of cryptographic keys associated with a given DID. For any part of an add-public-keys Patch Action to be applied to the DID’s state, all specified conditions MUST be met for all properties and values, else the patch MUST be discarded in its entirety. In the case a public key entry already exists for the given id specified within an add-public-keys Patch Action, the implementation MUST overwrite the existing entry entirely with the incoming patch. To construct an add-public-keys patch, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be add-public-keys.
    2. +
    3. The object MUST include a publicKeys property, and its value MUST be an array.
    4. +
    5. Each key being added MUST be represented by an entry in the publicKeys array, and each entry must be an object composed as follows: +
        +
      1. The object MUST include an id property, and its value MUST be a string with no more than fifty (50) Base64URL encoded characters. If the value is not of the correct type or exceeds the specified maximum length, the entire Patch Action MUST be discarded, without any of the patch being used to modify the DID’s state.
      2. +
      3. The object MUST include a type property, and its value MUST be a string and SHOULD be of a registered Cryptographic Suite.
      4. +
      5. The object MAY include a controller property, and its value MUST be a DID URI string. Implementations MAY specify a maximum length for the value, and if specified, the value MUST NOT exceed it. If the controller property is absent, the implementation must set the corresponding property in the resolved DID Document with a value that equates to the DID Document controller’s id. If the value is not of the correct type or exceeds the specified maximum length, the entire Patch Action MUST be discarded, without any of the patch being used to modify the DID’s state.
      6. +
      7. The object MUST include either a publicKeyJwk or a publicKeyMultibase property with values as defined by DID Core and DID Specification Registries. Implementers MAY choose to only define publicKeyJwk. These key representations are described in the JWK and Multibase subsections. Implementations MAY specify a maximum length for these values, and if specified, the values MUST NOT exceed it. If more or less than one of these properties is present, the value of the included property is not of the correct type, or the value exceeds the implementer’s specified maximum length, the entire Patch Action MUST be discarded, without any of the patch being used to modify the DID’s state.
      8. +
      9. The object MAY include a purposes property, and if included, its value MUST be an array of one or more strings. The value for each string SHOULD represent a verification relationship defined by DID Core or the DID Specification Registries. If the value is not of the correct type or contains any string not listed below (or defined by the implementer), the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.
      10. +
      +
        +
      • authentication: a reference to the key’s id MUST be included in the authentication array of the resolved DID Document.
      • +
      • keyAgreement: a reference to the key’s id MUST be included in the keyAgreement array of the resolved DID Document.
      • +
      • assertionMethod: a reference to the key’s id MUST be included in the assertionMethod array of the resolved DID Document.
      • +
      • capabilityDelegation: a reference to the key’s id MUST be included in the capabilityDelegation array of the resolved DID Document.
      • +
      • capabilityInvocation: a reference to the key’s id MUST be included in the capabilityInvocation array of the resolved DID Document.
      • +
      +
    6. +
    +
    NOTE

    An implementer may support transformations from publicKeyJwk or publicKeyMultibase to other representations required by a particular Cryptographic Suite. +For example, an implementer may support projecting publicKeyBase58 into the resolution result for the Ed25519VerificationKey2018 suite.

    +
    +
    § JWK
    +
    EXAMPLE
    {
    +  "action": "add-public-keys",
    +  "publicKeys": [
    +    {
    +      "id": "key1",
    +      "purposes": ["authentication"],
    +      "type": "EcdsaSecp256k1VerificationKey2019",
    +      "publicKeyJwk": {...}
    +    }
    +  ]
    +}
    +
    +
    +

    When the object contains a publicKeyJwk, the public key patch is using a JWK representation. The value of publicKeyJwk MUST be a public key expressed as a IETF RFC 7517 compliant JWK representation for a KEY_ALGORITHM supported by the implementation. The key represented by the JWK object MUST be projected into the verificationMethod array of the DID Document upon resolution. If the value is not a compliant JWK representation, the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.

    +
    § Multibase
    +
    EXAMPLE
    {
    +  "action": "add-public-keys",
    +  "publicKeys": [
    +    {
    +      "id": "key1",
    +      "purposes": ["authentication"],
    +      "type": "Ed25519VerificationKey2020",
    +      "publicKeyMultibase": "zgo4sNiXwJTbeJDWZLXVn9uTnRwgFHFxcgDePvEC9TiTYgRpG7q1p5s7yRAic"
    +    }
    +  ]
    +}
    +
    +
    +

    An implementer MAY define support for publicKeyMultibase in addition to supporting publicKeyJwk.

    +

    When the object contains a publicKeyMultibase, the public key patch is using a multibase representation. The key represented by the multibase encoding MUST be projected into the verificationMethod array of the DID Document upon resolution.

    +

    § remove-public-keys

    +
    EXAMPLE
    {
    +  "action": "remove-public-keys",
    +  "ids": ["key1", "key2"]
    +}
    +
    +
    +

    The remove-public-keys Patch Action describes the removal of cryptographic keys associated with a given DID. For any part of an remove-public-keys Patch Action to be applied to the DID’s state, all specified conditions MUST be met for all properties and values, else the patch MUST be discarded in its entirety. In the case there exists no public key entry for an id specified within a remove-public-keys Patch Action, the implementation SHALL perform no action and treat application of the delete operation as a success. To construct a remove-public-keys Patch Action, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be remove-public-keys.
    2. +
    3. The object MUST include a ids property, and its value MUST be an array of key IDs that correspond with keys presently associated with the DID that are to be removed. If the value is not of the correct type or includes a string value that is not associated with a key in the document, the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.
    4. +
    +

    § add-services

    +
    EXAMPLE
    {
    +  "action": "add-services",
    +  "services": [
    +    {
    +      "id": "sds",
    +      "type": "SecureDataStore",
    +      "serviceEndpoint": "http://hub.my-personal-server.com"
    +    },
    +    {
    +      "id": "did-config",
    +      "type": "LinkedDomains",
    +      "serviceEndpoint": {
    +        "origins": ["https://foo.com", "https://bar.com"]
    +      }
    +    }
    +  ]
    +}
    +
    +
    +

    The add-services Patch Action describes the addition of Service Endpoints to a DID’s state. For any part of an add-services Patch Action to be applied to the DID’s state, all specified conditions MUST be met for all properties and values, else the patch MUST be discarded in its entirety. In the case a service entry already exists for the given id specified within an add-services Patch Action, the implementation MUST overwrite the existing entry entirely with the incoming patch. To construct an add-services patch, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be add-services.
    2. +
    3. The object MUST include a services property, and its value MUST be an array. If the value is not of the correct type, the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.
    4. +
    5. Each service being added MUST be represented by an entry in the services array, and each entry must be an object composed as follows: +
        +
      1. The object MUST include an id property, and its value MUST be a string with a length of no more than fifty (50) Base64URL encoded characters. If the value is not of the correct type or exceeds the specified length, the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.
      2. +
      3. The object MUST include a type property, and its value MUST be a string with a length of no more than thirty (30) Base64URL encoded characters. If the value is not a string or exceeds the specified length, the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.
      4. +
      5. The object MUST include a serviceEndpoint property, and its value MUST be either a valid URI string (including a scheme segment: i.e. http://, git://) or a JSON object with properties that describe the Service Endpoint further. If the values do not adhere to these constraints, the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.
      6. +
      +
    6. +
    +

    § remove-services

    +
    EXAMPLE
    {
    +  "action": "remove-services",
    +  "ids": ["sds1", "sds2"]
    +}
    +
    +
    +

    The remove-services Patch Action describes the removal of cryptographic keys associated with a given DID. For any part of an remove-services Patch Action to be applied to the DID’s state, all specified conditions MUST be met for all properties and values, else the patch MUST be discarded in its entirety. In the case there exists no service entry for an id specified within a remove-public-keys Patch Action, the implementation SHALL perform no action and treat application of the delete operation as a success. To construct a remove-services Patch Action, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be remove-services.
    2. +
    3. The object MUST include a ids property, and its value MUST be an array of Service Endpoint IDs that correspond with Service Endpoints presently associated with the DID that are to be removed.
    4. +
    +

    § replace

    +
    EXAMPLE
    {
    +  "action": "replace",
    +  "document": {
    +    "publicKeys": [
    +      {
    +        "id": "key2",
    +        "purposes": ["authentication"],
    +        "type": "EcdsaSecp256k1VerificationKey2019",
    +        "publicKeyJwk": {...}
    +      }
    +    ],
    +    "services": [
    +      {
    +        "id": "sds3",
    +        "type": "SecureDataStore",
    +        "serviceEndpoint": "http://hub.my-personal-server.com"
    +      }
    +    ]
    +  }
    +}
    +
    +
    +

    The replace Patch Action acts as a total state reset that replaces a DID’s current PKI metadata state with the state provided. The replace Patch Action enables the declaration of public keys and service endpoints using the same schema formats as the add-public-keys and add-services Patch Actions. To construct a replace patch, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be replace.
    2. +
    3. The object MUST include a document property, and its value MUST be an object, which may contain the following properties: +
        +
      • The object MAY include a publicKeys property, and if present, its value MUST be an array of public key entries that follow the same schema and requirements as the public key entries from the add-public-keys Patch Action
      • +
      • The object MAY include a services property, and if present, its value MUST be an array of service endpoint entries that follow the same schema and requirements as the service endpoint entries from the add-services Patch Action.
      • +
      +
    4. +
    +

    § ietf-json-patch

    +

    The ietf-json-patch Patch Action describes a mechanism for modifying a DID’s state using IETF JSON Patch. To construct a ietf-json-patch Patch Action, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be ietf-json-patch.
    2. +
    3. The object MUST include a patches property, and its value MUST be an array of IETF JSON Patch operation objects.
    4. +
    +

    If ietf-json-patch is used to add or remove from a proof purpose collection, such as operations, recovery or assertionMethod, per the DID Core spec, each collection element MUST have a unique id property, or be a unique string identifier.

    +

    See Operation Verification for more details on how operations are verified.

    +
    EXAMPLE
    {
    +  "action": "ietf-json-patch",
    +  "patches": [
    +    { "op": "add", ... },
    +    { "op": "remove", ... },
    +    { "op": "replace", ... },
    +    { "op": "move", ... },
    +    { "op": "copy", ... }
    +  ]
    +}
    +
    +
    +
    EXAMPLE
    {
    +  "action": "ietf-json-patch",
    +  "patches": [
    +    {
    +      "op": "replace",
    +      "path": "/service",
    +      "value": [
    +          {
    +              "id": "did:example:123#edv",
    +              "type": "EncryptedDataVault",
    +              "serviceEndpoint": "https://edv.example.com/",
    +          }
    +      ]
    +    }
    +  ]
    +}
    +
    +
    +
    WARNING

    Without careful validation, use of ietf-json-patch may result in unrecoverable states, similar to “Deactivated”.

    +
    +
    WARNING

    Use of ietf-json-patch may harm an implmentation’s ability to perform validation on operations at ingestion time, which could impact performance negatively.

    +
    +

    § add-also-known-as

    +
    EXAMPLE
    {
    +  "action": "add-also-known-as",
    +  "uris": [
    +    "did:example:1234"
    +  ]
    +}
    +
    +
    +

    The add-also-known-as Patch Action describes the addition of Also Known As to a DID’s state. For any part of an add-also-known-as Patch Action to be applied to the DID’s state, all specified conditions MUST be met for all properties and values, else the patch MUST be discarded in its entirety. To construct an add-also-known-as patch, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be add-also-known-as.
    2. +
    3. The object MUST include a uris property, and its value MUST be an array. Each value of the array MUST be a URI. If the value is not of the correct type, the entire Patch Action MUST be discarded, without any of it being used to modify the DID’s state.
    4. +
    +

    § remove-also-known-as

    +
    EXAMPLE
    {
    +  "action": "remove-also-known-as",
    +  "uris": [
    +    "did:example:1234"
    +  ]
    +}
    +
    +
    +

    The remove-also-known-as Patch Action describes the removal of Also Known As from a DID’s state. For any part of an remove-also-known-as Patch Action to be applied to the DID’s state, all specified conditions MUST be met for all properties and values, else the patch MUST be discarded in its entirety. To construct a remove-also-known-as Patch Action, compose an object as follows:

    +
      +
    1. The object MUST include an action property, and its value MUST be remove-also-known-as.
    2. +
    3. The object MUST include a uris property, and its value MUST be an array of URIs that correspond with Also Known As URIs presently associated with the DID that are to be removed.
    4. +
    +

    § Transaction & Operation Processing

    +

    § Transaction Anchoring

    +

    Once an Core Index File, Provisional Index File, and associated Chunk Files have been assembled for a given set of operations, a reference to the Core Index File must be embedded within the target anchoring system to enter the set of operations into the Sidetree implementation’s global state. The following process:

    +
      +
    1. Generate a transaction for the underlying anchoring system
    2. +
    3. Generate and include the following value, herein referred to as the Anchor String, within the transaction: +
        +
      1. Generate a numerical string ('732') that represents the total number of operations present in the Core Index File and Provisional Index File, herein referred to as the Operation Count.
      2. +
      3. Using the CAS_URI_ALGORITHM, generate a CID for the Core Index File, herein referred to as the Core Index File CAS URI.
      4. +
      5. Join the Operation Count and Core Index File CAS URI with a . as follows:
        "10000" + "." + "QmWd5PH6vyRH5kMdzZRPBnf952dbR4av3Bd7B2wBqMaAcf"
        +
        +
      6. +
      7. Embed the Anchor String in the transaction such that it can be located and parsed by any party that traverses the history of the target anchoring system.
      8. +
      +
    4. +
    5. If the implementation implements a per-op fee, ensure the transaction includes the fee amount required for the number of operations being anchored.
    6. +
    7. Encode the transaction with any other data or values required for inclusion by the target anchoring system, and broadcast it.
    8. +
    +

    § CAS File Propagation

    +

    To ensure other nodes of the implementation can retrieve the operation files required to ingest the included operations and update the states of the DIDs it contains, the implementer must ensure that the files associated with a given set of operations being anchored are available to peers seeking to request and replicate them across the CAS storage layer. Use the following procedure for propagating transaction-anchored CAS files:

    +
      +
    1. If the underlying anchoring system is subject to an anchoring inclusion delay (e.g. the interval between blocks in a blockchain), implementers SHOULD wait until they receive a confirmation of inclusion (whatever that means for the target anchoring system) before exposing/propagating the operation files across the CAS network. (more about the reason for this in the note below)
    2. +
    3. After confirmation is received, implementers SHOULD use the most effective means of proactive propagation that the CAS_PROTOCOL supports.
    4. +
    5. A Sidetree-based implementation node that anchors operations should not assume other nodes on the CAS network will indefinitely retain and propagate the files for a given set of operations they anchor. A node SHOULD retain and propagate any files related to the operations it anchors.
    6. +
    +
    NOTE

    Most anchoring systems feature some delay between the broadcast of a transaction and the recorded inclusion of the transaction in the anchoring system’s history. Because operation data included in the CAS files contains revealed commitment values for operations, propagating those files before confirmation of transaction inclusion exposes revealed commitment values to external entities who may download them prior to inclusion in the anchoring system. This means an attacker who learns of the revealed commitment value can craft invalid transactions that could be included before the legitimate operation the user is attempting to anchor. While this has no affect on proof-of-control security for a DID, an observing node would have to check the signatures of fraudulent transactions before the legitimate transaction is found, which could result in slower resolution processing for the target DID.

    +
    +

    § Transaction Processing

    +

    Regardless of the anchoring system an implementer chooses, the implementer MUST be able to sequence Sidetree-specific transactions within it in a deterministic order, such that any observer can derive the same order if the same logic is applied. The implementer MUST, either at the native transaction level or by some means of logical evaluation, assign Sidetree-specific transactions a Transaction Number. Transaction Numbers MUST be assigned to all Sidetree-specific transactions present in the underlying anchoring system after GENESIS_TIME, regardless of whether or not they are valid.

    +
      +
    1. An implementer MUST develop implementation-specific logic that enables deterministic ordering and iteration of all protocol-related transactions in the underlying anchoring system, such that all operators of the implementation process them in the same order.
    2. +
    3. Starting at GENESIS_TIME, begin iterating transactions using the implementation-specific logic.
    4. +
    5. For each transaction found during iteration that is determined to be a protocol-related transaction, process the transaction as follows: +
        +
      1. Assign the transaction a Transaction Number.
      2. +
      3. If the implementation supports enforcement value locking, and the transaction is encoded in accordance with the implementation’s value locking format, skip the remaining steps and process the transaction as described in the Proof of Fee section on Value Locking.
      4. +
      5. The Anchor String MUST be formatted correctly - if it is not, discard the transaction and continue iteration.
      6. +
      7. If the implementation DOES NOT support enforcement of a per-operation fee, skip this step. If enforcement of a per-operation fee is supported, ensure the transaction fee meets the per-operation fee requirements for inclusion - if it DOES NOT, discard the transaction and continue iteration.
      8. +
      9. If the implementation DOES NOT support enforcement of Value Locking, skip this step. If enforcement of Value Locking is supported, ensure the transaction’s fee meets the Value Locking requirements for inclusion - if it does not, discard the transaction and continue iteration.
      10. +
      11. Parse the Anchor String to derive the Operation Count and Core Index File CAS URI.
      12. +
      13. Use the CAS_PROTOCOL to fetch the Core Index File using the Core Index File CAS URI. If the file cannot be located, retain a reference that signifies the need to retry fetch of the file. If the file successfully retrieved, proceed to the next section on how to process an Core Index File
      14. +
      +
    6. +
    +

    § Core Index File Processing

    +

    This sequence of rules and processing steps must be followed to correctly process an Core Index File:

    +
      +
    1. The Core Index File MUST NOT exceed the MAX_CORE_INDEX_FILE_SIZE - if it does, cease processing, discard the file data, and retain a reference that the file is to be ignored.
    2. +
    3. Decompress the Core Index File in accordance with the implementation’s COMPRESSION_ALGORITHM, within the memory allocation limit specified for decompression in accordance with the implementation-defined MAX_MEMORY_DECOMPRESSION_FACTOR.
    4. +
    5. The Core Index File MUST validate against the protocol-defined Core Index File schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored. +
        +
      • While this rule is articulated in the Core Index File section of the specification, it should be emphasized to ensure accurate processing: an Core Index File MUST NOT include multiple operations in the operations section of the Core Index File for the same DID Suffix - if any duplicates are found, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored.
      • +
      +
    6. +
    7. If processing of rules 1 and 2 above resulted in successful validation of the Core Index File, initiate retrieval of the Provisional Index File via the CAS_PROTOCOL using the provisionalIndexFileUri property’s CAS URI value, if the provisionalIndexFileUri property is present. This is only a SUGGESTED point at which to begin retrieval of the Provisional Index File, not a blocking procedural step, so you may continue with processing before retrieval of the Provisional Index File is complete.
    8. +
    9. Iterate the Core Index File Create Entries, and for each entry, process as follows: +
        +
      1. Derive the DID Suffix from the values present in the entry.
      2. +
      3. Ensure the DID Suffix of the operation entry has not been included in another valid operation that was previously processed in the scope of this Core Index File.
      4. +
      5. Create an entry for the operation within the Operation Storage area relative to the DID Suffix.
      6. +
      +
    10. +
    11. Iterate the Core Index File Recovery Entries, and for each entry, process as follows: +
        +
      1. Ensure the DID Suffix of the operation entry has not been included in another valid operation that was previously processed in the scope of this Core Index File.
      2. +
      3. Create an entry for the operation within the Operation Storage area relative to the DID Suffix.
      4. +
      +
    12. +
    13. Iterate the Core Index File Deactivate Entries, and for each entry, process as follows: +
        +
      1. Ensure the DID Suffix of the operation entry has not been included in another valid operation that was previously processed in the scope of this Core Index File.
      2. +
      3. Create an entry for the operation within the Operation Storage area relative to the DID Suffix.
      4. +
      +
    14. +
    +

    § Provisional Index File Processing

    +

    This sequence of rules and processing steps must be followed to correctly process a Provisional Index File:

    +
      +
    1. The Provisional Index File MUST NOT exceed the MAX_PROVISIONAL_INDEX_FILE_SIZE - if it does, cease processing, discard the file data, and retain a reference that the file is to be ignored.
    2. +
    3. Decompress the Provisional Index File in accordance with the implementation’s COMPRESSION_ALGORITHM, within the memory allocation limit specified for decompression in accordance with the implementation-defined MAX_MEMORY_DECOMPRESSION_FACTOR.
    4. +
    5. The Provisional Index File MUST validate against the protocol-defined Provisional Index File schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that all Provisional-type files and their operations are to be ignored.
    6. +
    7. If processing of rules 1 and 2 above resulted in successful validation of the Provisional Index File, begin retrieval of the Chunk Files by iterating the chunks array and using the CAS_PROTOCOL to fetch each entry’s chunkFileUri (a CAS URI based on the CAS_URI_ALGORITHM). This is only a SUGGESTED point at which to begin retrieval of the Chunk Files, not a blocking procedural step, so you may continue with processing before retrieval of the Chunk Files is complete.
    8. +
    9. Iterate the Provisional Index File Update Entries, and for each entry, process as follows: +
        +
      1. Ensure the DID Suffix of the operation entry has not been included in another valid operation that was previously processed in the scope of the Provisional Index File or its parent Core Index File.
      2. +
      3. Create an entry for the operation within the Operation Storage area relative to the DID Suffix.
      4. +
      +
    10. +
    11. If the node is in a Light Node configuration, retain a reference to the Chunk Files relative to the DIDs in the anchored batch for just-in-time fetch of the Chunk Files during DID resolution.
    12. +
    +

    § Core Proof File Processing

    +

    This sequence of rules and processing steps must be followed to correctly process an Core Proof File:

    +
      +
    1. The Core Proof File MUST NOT exceed the MAX_PROOF_FILE_SIZE - if it does, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored.
    2. +
    3. Decompress the Core Proof File in accordance with the implementation’s COMPRESSION_ALGORITHM, within the memory allocation limit specified for decompression in accordance with the implementation-defined MAX_MEMORY_DECOMPRESSION_FACTOR.
    4. +
    5. The Core Proof File MUST validate against the protocol-defined Core Proof File schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that the whole batch of anchored operations and all its files are to be ignored.
    6. +
    7. Iterate any Core Proof File Recovery Entries and Core Proof File Deactivate Entries that may be present, and for each entry, process as follows: +
        +
      1. Ensure an operation for the related DID has not been included in another valid operation that was previously processed in the scope of the Core Proof File or its parent Core Index File.
      2. +
      3. Create an entry, or associate with an existing entry, the proof payload within the Operation Storage area relative to the DID Suffix.
      4. +
      +
    8. +
    +

    § Provisional Proof File Processing

    +

    This sequence of rules and processing steps must be followed to correctly process an Provisional Proof File:

    +
      +
    1. The Provisional Proof File MUST NOT exceed the MAX_PROOF_FILE_SIZE - if it does, cease processing, discard the file data, and retain a reference that all Provisional-type files and their operations are to be ignored.
    2. +
    3. Decompress the Provisional Proof File in accordance with the implementation’s COMPRESSION_ALGORITHM, within the memory allocation limit specified for decompression in accordance with the implementation-defined MAX_MEMORY_DECOMPRESSION_FACTOR.
    4. +
    5. The Provisional Proof File MUST validate against the protocol-defined Provisional Proof File schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that all Provisional-type files and their operations are to be ignored.
    6. +
    7. Iterate any Provisional Proof File Update Entries that may be present, and for each entry, process as follows: +
        +
      1. Ensure an operation for the related DID has not been included in another valid operation that was previously processed in the scope of the Provisional Proof File or its parent Core Index File. If another previous, valid operation was already processed in the scope of the Provisional Proof File or Core Index File for the same DID, do not process the operation and move to the next operation in the array.
      2. +
      3. Create an entry, or associate with an existing entry, the proof payload within the Operation Storage area relative to the DID Suffix.
      4. +
      +
    8. +
    +

    § Chunk File Processing

    +

    This sequence of rules and processing steps must be followed to correctly process a Chunk File chunk:

    +
      +
    1. The Chunk File chunk MUST NOT exceed the MAX_CHUNK_FILE_SIZE - if it does, cease processing, discard the file data, and retain a reference that the file is to be ignored.
    2. +
    3. Decompress the Chunk File in accordance with the implementation’s COMPRESSION_ALGORITHM, within the memory allocation limit specified for decompression in accordance with the implementation-defined MAX_MEMORY_DECOMPRESSION_FACTOR.
    4. +
    5. The Chunk File MUST validate against the protocol-defined Chunk File schema and construction rules - if it DOES NOT, cease processing, discard the file data, and retain a reference that the file is to be ignored.
    6. +
    7. The canonicalized buffer of each Chunk File delta entry must not exceed the MAX_DELTA_SIZE. If any deltas entries exceed the maximum size cease processing, discard the file data, and retain a reference that the file is to be ignored.
    8. +
    9. In order to process Chunk File Delta Entries in relation to the DIDs they are bound to, they must be mapped back to the Create, Recovery, and Update operation entries present in the Core Index File and Provisional Index File. To create this mapping, concatenate the Core Index File Create Entries, Core Index File Recovery Entries, Provisional Index File Update Entries into a single array, in that order, herein referred to as the Operation Delta Mapping Array. Pseudo-code example:
      let mappingArray = [].concat(CREATE_ENTRIES, RECOVERY_ENTRIES, UPDATE_ENTRIES);
      +
      +
    10. +
    11. With the Operation Delta Mapping Array assembled, iterate the Chunk File Delta Entries from 0 index forward, processing each Chunk File Delta Entry as follows: +
        +
      1. Identify the operation entry from the Operation Delta Mapping Array at the same index as the current iteration and determine its DID Suffix (for Core Index File Create Entries, you will need to compute the DID Suffix). This is the DID the current iteration element maps to.
      2. +
      3. Store the current Chunk File Delta Entry relative to its operation entry in the persistent storage area designated for the related DID Suffix.
      4. +
      +
    12. +
    +
    NOTE

    The assembly and processing of Chunk Files will change in a future update to the protocol to accommodate the introduction of multiple chunk files. The current protocol version is designed around one Chunk File, but the scaffolding is present to move to multiple Chunk Files as development progresses.

    +
    +

    § Proof of Fee

    +
    NOTE

    This section is non-normative

    +
    +

    Sidetree implementers MAY choose to implement protective mechanisms designed to strengthen a Sidetree network against low-cost spurious operations. These mechanisms are primarily designed for open, permissionless implementations utilizing public blockchains that feature native crypto-economic systems.

    +

    § Base Fee Variable

    +

    All of the mechanisms described in this section are based on the same underlying numeric value, known as the Base Fee Variable, that is calculated by processing a collection of native variables from the target anchoring system with a set of deterministic functions. The Base Fee Variable is used in two primary ways:

    +
      +
    1. To set a minimum required native transaction fee that must be paid relative to the number of DID operations a writer seeks to anchor with the transaction
    2. +
    3. To establish a fee basis for any additional economic protections, such as a value locking mechanism wherein a writer must escrow or burn some amount of digital asset to have other nodes view their writes into the network as valid.
    4. +
    +

    To calculate the Base Fee Variable, every implementation will define a deterministic algorithm, which may be static or change dynamically via some form of logical calculation that is applied by all nodes in the system at some interval.

    +

    § Per-Operation Fee

    +

    An implementation may choose to require a per-operation fee, to ensure that the baseline fee paid by a writer on the anchoring system is not able to game unusually low-fee periods to flood the anchoring system with Sidetree-embedded transactions. The following logical process SHOULD be used to set and evaluate a per-operation fee for each Sidetree-bearing transaction that is observed:

    +
      +
    1. Determine the Base Fee Variable for the current block or transaction interval being assessed.
    2. +
    3. Multiply the Base Fee Variable by the Operation Count integer from the Anchor String, producing the total batch operation fee.
    4. +
    5. Validate that the transaction anchored in the anchoring system has spent at least the sum of the total batch operation fee, as derived above.
    6. +
    7. If the transaction spent the required fee (or some amount greater), proceed with processing the anchored batch of DID operations. If the transaction failed to spend the required fee (or some amount greater), ignore the transaction as invalid.
    8. +
    +

    § Value Locking

    +

    An implementation may choose to institute a value locking scheme wherein digital assets native to the underlying anchoring system are locked under some conditions set by the implementation that afford a locking entity access to greater write operation volumes and related capabilities. The basis principle of value locking is to require a form of escrow to gate consumption of resources in the network. In simple terms, with value locking in place, an implementation can require a writer who wants to write batches at the maximum size to first lock an amount of the native underlying anchoring system asset commensurate with the batch sizes they want to anchor. Implementations can create value locking mechanisms a number of ways, but the following is a general example of a value locking approach:

    +
      +
    1. Using the Base Fee Variable, assess a required locking amount that follows an implementation-defined cost curve that maps to the size of batches up to the maximum batch size. (If your implementation features recurring evaluation logic, this will be reevaluated for whatever block or transaction interval you define)
    2. +
    3. Using the underlying anchoring system’s asset locking capabilities (e.g. a Bitcoin Timelock script), validate that all transactions observed within the current block or transaction interval are linked to a sum of locked value that meets or exceeds the required value locking amount. Each locked sum may only be linked to one batch per block or transaction interval, which means anchoring multiple batches that require locks requires multiple locks, compounding the sum that must be locked by a multi-batch writer. A link from a batch-embedded transaction to a lock is typically determined by proving control of a lock via some form of deterministic proof that ties the lock to the batch-embedded transaction (e.g. signing the batch-embedded transactions with keys that control the lock)
    4. +
    5. If a transaction is linked to a locked sum that has been unused by any other transactions from that lock controller during the block, proceed with ingesting the anchored batch and processing it per the directives in the file and transaction processing section of this specification.
    6. +
    +

    § Resolution

    +

    § Operation Compilation

    +
      +
    1. +

      Upon invocation of resolution, retrieve all observed operations for the DID Unique Suffix of the DID URI being resolved.

      +
    2. +
    3. +

      If record of the DID being published has been observed, proceed to Step 3. If there is no observed record of the DID being published, skip all remaining Operation Compilation steps and process the DID as follows:

      +
        +
      1. If the DID URI is a Short-Form DID URI, abort resolution and return Not Found.
      2. +
      3. If the DID URI is a Long-Form DID URI, process as follows: +
          +
        1. Isolate the last colon-separated (:) segment of the DID URI.
        2. +
        3. Using the implementation’s DATA_ENCODING_SCHEME, decode the value. If the values fail to properly decode in accordance with the implementation’s DATA_ENCODING_SCHEME, abort resolution and return Unresolvable.
        4. +
        5. JSON parse the resulting value, apply the canonicalization algorithm, reencode the resulting value and ensure it is the same as the initial value from Step 1. If the values do not match, abort resolution and return Unresolvable.
        6. +
        7. Use the Hashing Process to generate a hash of the canonicalized Create Operation Suffix Data Object and ensure it matches the DID Unique Suffix, if the values do not match, abort resolution and return Unresolvable.
        8. +
        9. Validate the resulting object in accordance with the Create Operation Suffix Data Object schema. If the value is found to be a valid Create Operation Suffix Data Object. If the value fails validation, abort resolution and return Unresolvable.
        10. +
        11. Validate the Create Operation Delta Object (which is present in a Chunk File Delta Entry for published, anchored DIDs). If the value is found to be a valid Create Operation Delta Object. If the value fails validation, abort resolution and return Unresolvable.
        12. +
        13. If all steps above are successful, flag the DID as Unpublished and continue to Create operation processing as if the values decoded and validated in the steps above represent the only operation associated with the DID.
        14. +
        +
      4. +
      +
    4. +
    5. +

      Constructing the Operation Hash Map: generate a Create Operation Pool, which will house references to any Create operations processed in the steps below, and begin iterating through the operations present in the DID’s Operation Storage area as follows:

      +
        +
      1. +

        Type-specific operation evaluation:

        + +
      2. +
      3. +

        Ensure a key exists in the Operation Hash Map corresponding to the Map Hash, and that the corresponding value is an array. If no property exists for the Map Hash, create one and let its value be an array.

        +
      4. +
      5. +

        Insert the entry into the array of the Map Hash at its proper position in ascending Anchor Time order.

        +
      6. +
      +
    6. +
    7. +

      Create operation processing: If no operations are present in the Create Operation Pool, cease resolution of the DID and return Unresolvable. If the Create Operation Pool contains operation entries, process them as follows:

      +
        +
      1. Store the value of the recoveryCommitment property from the entry’s Create Operation Suffix Data Object as the Next Recovery Commitment for use in processing the next Recovery operation.
      2. +
      3. Retrieve the Chunk File Delta Entry corresponding to the operation and proceed to Step 3. If the Chunk File Delta Entry is not present because the associated Chunk File has not yet been retrieved and processed (i.e. node is a Light Node implementation, file was previously unavailable, etc.), perform the following steps: +
          +
        1. Using the CAS_PROTOCOL, fetch the Chunk File using the associated Chunk File URI. If the file cannot be retrieved, proceed to recovery and deactivate operation processing.
        2. +
        3. Validate the Chunk File using the Chunk File Processing procedure. If the Chunk File is valid. If the file is invalid, proceed to recovery and deactivate operation processing.
        4. +
        +
      4. +
      5. Validate the Chunk File Delta Entry. If the Chunk File Delta Entry is invalid, proceed to Recovery and deactivate operation processing.
      6. +
      7. Generate a hash of the canonicalized Chunk File Delta Entry via the HASH_PROTOCOL and ensure the hash matches the value of the Create Operation Suffix Data Object deltaHash property. If the values are exactly equal, proceed, if they are not, proceed to recovery and deactivate operation processing.
      8. +
      9. Store the updateCommitment value of the Chunk File Delta Entry as the Next Update Commitment for use in processing the next Update operation.
      10. +
      11. Begin iterating the patches array in the Chunk File Delta Entry, and for each DID State Patch entry, perform the following steps: +
          +
        1. Validate the entry in accordance any requirements imposed by the Patch Action type indicated by the action value of the entry. If the entry is valid, proceed, if the entry fails validation, reverse all modifications to the DID’s state and proceed to recovery and deactivate operation processing.
        2. +
        3. Apply the patch as directed by the Patch Action type specified by the action property. If any part of the patch fails or produces an error, reverse all modifications to the DID’s state and proceed to recovery and deactivate operation processing.
        4. +
        +
      12. +
      +
    8. +
    9. +

      Recovery and deactivate operation processing: when Create operations have been processed, process any Recovery and Deactivate operations that may exist in the Operation Hash Map via the iteration procedure below. If no Recovery and Deactivate operations are present, proceed to update operation processing.

      +
        +
      1. If a property is present in the Operation Hash Map that matches the Next Recovery Commitment exactly, process its array of operation entries using the following steps. If no property exists in the Operation Hash Map that matches the Next Recovery Commitment exactly, exit recovery and deactivate operation processing and advance to update operation processing.
      2. +
      3. Iterate the array of operation entries forward from 0-index using the process enumerated below until all valid entries are found and processed: + +
      4. +
      5. Once all Recovery and Deactivate operations have been processed, if the Next Update Commitment value is present, proceed to update operation processing. If the Next Update Commitment value is not present or the DID is in a Deactivated state, proceed to compiled state processing.
      6. +
      +
    10. +
    11. +

      Update operation processing: if the DID is marked as Deactivated or the Next Update Commitment value is not present, skip Update processing and proceed to compiled state processing. If the Next Update Commitment value is present and no Deactivate operations were successfully processed during recovery and deactivate operation processing, process any Update operations that may exist in the Operation Hash Map using the following processing loop:

      +
        +
      1. +

        If a property is present in the Operation Hash Map that matches the Next Update Commitment exactly, process its array of operation entries using the following steps. If no property exists in the Operation Hash Map that matches the Next Update Commitment exactly, exit update operation processing and advance to compiled state processing.

        +
      2. +
      3. +

        Iterate the array of operation entries forward from 0-index using the process enumerated below until all valid entries are found and processed:

        +
          +
        1. Retrieve the operation’s Provisional Proof File Update Entry and Chunk File Delta Entry from the pre-processed Provisional Proof File and Chunk File associated with the operation and proceed to validation of the entries, or, if the Provisional Proof File and Chunk File have yet to be retrieved and processed (e.g. the resolving node is in a Light Node configuration), perform the following steps: +
            +
          1. Using the CAS_PROTOCOL, fetch the Provisional Proof File and Chunk File using the associated Provisional Proof File URI and Chunk File URI.
          2. +
          3. If the Provisional Proof File is unable to be retrieved, skip the entry and advance to the next operation.
          4. +
          5. Validate the Provisional Proof File. If the file is valid, proceed, if the file is invalid, skip the entry and advance to the next operation.
          6. +
          +
        2. +
        3. Using the revealed updateKey JWK value, validate the Update Operation Signed Data Object signature. If the signature is valid, proceed, if the signature is invalid, skip the entry and iterate forward to the next entry.
        4. +
        5. Validate the Chunk File and Chunk File Delta Entry. If the Chunk File and Chunk File Delta Entry are valid, proceed, if the entry is invalid, skip the entry and iterate forward to the next entry.
        6. +
        7. Generate a hash of the canonicalized Chunk File Delta Entry via the HASH_PROTOCOL and ensure the hash equals the value of the Update Operation Signed Data Object deltaHash property. If the values are exactly equal, proceed, if they are not, skip the entry and iterate forward to the next entry.
        8. +
        9. Store the updateCommitment value of the Chunk File Delta Entry as the Next Update Commitment for use in processing the next Update operation.
        10. +
        11. Begin iterating the patches array in the Chunk File Delta Entry, and for each DID State Patch entry, perform the following steps: +
            +
          1. Apply the patch as directed by the Patch Action type specified by the action property. If any of the patches produce an error, reverse all of this operation’s patch modifications to the DID state data, while retaining the successful rotation to the next Next Update Commitment value, and iterate forward to the next operation.
          2. +
          +
        12. +
        +
      4. +
      +
    12. +
    13. +

      Compiled state processing: After the DID’s operations have been evaluated in the compilation steps above, the implementation MUST use the DID’s compiled state to generate a valid DID Document in accordance with the W3C Decentralized Identifiers specification. If your implementation is designed to produce a different format of state data, ensure it outputs in accordance with the format you are targeting.

      +
    14. +
    15. +

      If the implementation is outputting DID state data as a DID Document, and the DID Document is being rendered in the JSON-LD representation variant, the implementer SHOULD add an @base entry to the document’s @context, and set the @base value to the id of the resolved DID. This ensures relative path values in the output DID Document are correctly projected into id-related strings by JSON-LD parsers.

      +
    16. +
    17. +

      Once a valid DID state output has been generated (e.g. a valid DID Document), proceed to the DID Resolver Output process if you intend to render the output as a DID Document, in accordance with the Decentralized Identifier Resolution specification.

      +
    18. +
    +

    § DID Resolver Output

    +

    The following describes how to construct Decentralized Identifier Resolution-compliant Resolution Result based on a DID resolved via the Operation Compilation process described in the section above.

    +

    If the DID was determined to be Not Found or Unresolvable, return a response consistent with those states. If the compiled DID was not determined to be Not Found or Unresolvable (per the Operation Compilation process above), proceed as follows:

    +
      +
    1. +

      Generate a JSON object for the Resolution Result, structured in accordance with the Decentralized Identifier Resolution specification.

      +
    2. +
    3. +

      Set the didDocument property of the Resolution Result object to the resolved DID Document generated via the Operation Compilation process.

      +
    4. +
    5. +

      The Resolution Result object MUST include a didDocumentMetadata property, and its value MUST be an object composed of the following properties:

      +
      EXAMPLE
      "didDocumentMetadata": {
      +  "deactivated": true,
      +  "canonicalId": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
      +  "equivalentId": ["did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"],
      +  "method": {
      +    "published": true,
      +    "recoveryCommitment": "EiBfOZdMtU6OBw8Pk879QtZ-2J-9FbbjSZyoaA_bqD4zhA",
      +    "updateCommitment": "EiDOrcmPtfMHuwIWN6YoihdeIPxOKDHy3D6sdMXu_7CN0w"
      +  }
      +}
      +
      +
      +
        +
      • deactivated - This property MUST be present if the resolved DID is determined to be in a deactivated state, and it MUST be set to the boolean value true. If the resolved DID is not in a deactivated state, this value MUST be set to the boolean value false.
      • +
      • canonicalId - If canonical representation of the resolved DID exists, the implementation MUST include the canonicalId property, and the presence and value of the canonicalId property is determined as follows: +
          +
        1. Presence and value of the canonicalId property: +
            +
          • If the DID being resolved is a Long-Form DID representation and is unpublished, the canonicalId property MUST NOT be included in the didDocumentMetadata object.
          • +
          • If the DID being resolved is a Long-Form DID representation and is published, the canonicalId property MUST be included in the didDocumentMetadata object, and its value MUST be the Short-Form DID representation.
          • +
          • If the DID being resolved is a Short-Form DID representation and is published, the canonicalId property MUST be included in the didDocumentMetadata object, and its value MUST be the Short-Form DID representation.
          • +
          +
        2. +
        3. Inclusion of the canonical DID representation in the equivalentId array: +
            +
          • If under any of the cases above there is a canonical DID representation included for the canonicalId property, the canonical DID representation MUST also be included in the equivalentId array. See below for details on the equivalentId property.
          • +
          +
        4. +
        +
      • +
      • equivalentId - If equivalent representations of the resolved DID exist, the implementation MUST include the equivalentId property, and the presence and value of the equivalentId property is determined as follows: +
          +
        • If the DID being resolved is a Long-Form DID representation, the equivalentId property MUST be included in the didDocumentMetadata object, and its array value MUST include the Short-Form DID representation.
        • +
        +
      • +
      • method - Its value MUST be an object composed of the following values: +
          +
        1. The object MUST include a published property with a boolean value. If the compiled DID state is flagged as Unpublished and/or Not Found (per the Operation Compilation process), the published property MUST be set to false, otherwise, set the value to true if a valid anchoring entry was located for the DID.
        2. +
        3. The object MUST include an updateCommitment property, and its value MUST be the updateCommitment hash value expected to be fulfilled in with the next updateKey revealed in the next Update operation.
        4. +
        5. The object MUST include an recoveryCommitment property, and its value MUST be the recoveryCommitment hash value expected to be fulfilled in with the next recoveryKey revealed in the next Recovery operation.
        6. +
        +
      • +
      +
    6. +
    +

    § Unresolvable DIDs

    +

    If a DID is found to be unresolvable, per the logic defined under the Operation Compilation section, a Sidetree-compliant node SHOULD return the appropriate error code over the transport of the resolution request. For HTTP, you MUST return the responses and status codes defined by the Sidetree API specification section on Resolution.

    +

    § Late Publishing

    +

    Sidetree is an eventually strongly consistent, conflict-free state resolution system based on cryptographically signed, delta-based DID operations, which derives its deterministic order of operations from the position of operation entries in a decentralized anchoring system. Unlike the native tokens of a strongly immutable anchoring system (e.g. Bitcoin), DIDs represent unique identifiers that are generally intended to be non-transferable. As such, the Sidetree protocol provides no technical mechanism for exchanging ownership of DIDs with ‘double-spend’ assurance, the way one might do with a fungible cryptocurrency token.

    +

    For Sidetree, non-transferability manifests in a distinct way: a DID owner is ultimately in control of their past, present, and future state changes, and can expose state change operations as they choose across the lineage of their DID’s operational history. DID owners can create forks within their own DID state history, and nothing forces them to expose DID state operations they anchor. A DID operation anchored in the past, at Time X, can be exposed to sometime in the future, at Time Y. This means Sidetree nodes could become aware of past operations that create a change in the lineage of a DID - this is known as Late Publishing of a DID operation. However, due to the non-transferability of DIDs, this condition is isolated to each DID’s own state lineage, and resolved by Sidetree’s deterministic ruleset, which guarantees only one fork of a DID’s state history can ever be valid. To better understand this, consider the following diagram that illustrates a DID owner, Alice, creating forks by creating and anchoring operations in the past that she does not expose to the network:

    +
    +graph TB + 0 --> 1 + 1 --> 2a + 1 --> 2b + 2b --> 3 +
    +

    As you can see above, Alice has created a fork by anchoring the divergent operations 2a and 2b. Let us assume Alice refrains from publishing the CAS files that other Sidetree nodes would detect to locate and replicate the date for operation 2a, and further, assume Alice continues creating more operation history stemming from operation 2b. Whenever Alice exposes the DID operation data for 2a, other Sidetree nodes will need to decide which operation between 2a and 2b is the ‘right’ operation. The Sidetree protocol includes a strict rule that resolves this conflict, and any variation of it: the earliest operation in Anchor Time always wins. If operation 2a precedes operation 2b in Anchor Time, whenever she decides to publish operation 2a, all other Sidetree nodes would process the operation and immediately deem operation 2a to be the valid, correct operational fork. This remains true even if Alice continues building operational history stemming from operation 2b any amount of time into the future.

    +

    With this example of late publishing in mind, the most important aspect to remember is that DID owners decide what the PKI state of their DIDs should be, and remain in control of that state independent of the shape of their DID operational history. The net takeaway is that regardless of how a DID owner decides to update the state of their DID, the decision over what that state is remains entirely their choice.

    +

    § Method Versioning

    +

    It is RECOMMENDED that Sidetree based DID Methods implement the following versioning structures to support development, testing, staging and production network deployments.

    +

    We define a network suffix as follows for a given DID Method:

    +

    did:<method>:<network>:<didUniqueSuffix>

    +

    If no network suffix is provided, it is assumed that the “mainnet” or “production” network is to be used… for example, these DIDs should resolve to the same DID state:

    +
    did:elem:mainnet:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ
    +did:elem:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ
    +
    +

    An ION DID on the Bitcoin Testnet3 testnet is defined as follows:

    +

    did:ion:testnet3:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ

    +

    An ELEM DID on the Ethereum Ropsten testnet is defined as follows:

    +

    did:elem:ropsten:EiD0x0JeWXQbVIpBpyeyF5FDdZN1U7enAfHnd13Qk_CYpQ

    +
    WARNING

    Implementers should be aware that if the underlying decentralized anchoring system were to fork, the identifiers will also be forked. In this case, the a new identifier must be created either through an indication at the network layer or with a completely new method name to identify the decentralized identifiers of the forked network.

    +
    +

    § Context

    +

    Per the DID Core Spec an @context MAY be used to represent a DID Document as Linked Data.

    +

    If an @context is present, any properties not defined in DID Core, MUST be defined in this context, or in a DID Method specific one.

    +

    For example:

    +
    {
    +    "@context": [
    +        "https://www.w3.org/ns/did/v1", 
    +        "https://identity.foundation/sidetree/contexts/v1"
    +        "https://example.com/method/specific.jsonld"
    +    ]
    +}
    +
    +

    § recovery

    +

    A verificationMethod used to support DID Document Recover Operation verification.

    +

    For Example:

    +
    {
    +    "@context": [
    +        "https://www.w3.org/ns/did/v1", 
    +        "https://identity.foundation/sidetree/contexts/v1"
    +    ],
    +    "recovery": [{
    +      "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw",
    +      "type": "EcdsaSecp256k1VerificationKey2019",
    +      "publicKeyJwk": {
    +        "crv": "secp256k1",
    +        "kid": "JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw",
    +        "kty": "EC",
    +        "x": "dWCvM4fTdeM0KmloF57zxtBPXTOythHPMm1HCLrdd3A",
    +        "y": "36uMVGM7hnw-N6GnjFcihWE3SkrhMLzzLCdPMXPEXlA"
    +      }
    +    }]
    +}
    +
    +

    § operation

    +

    A verificationMethod used to support verification of DID Document Operations: Create, Update, Deactivate.

    +

    For Example:

    +
    {
    +    "@context": [
    +        "https://www.w3.org/ns/did/v1", 
    +        "https://identity.foundation/sidetree/contexts/v1"
    +    ],
    +    "operation": [{
    +      "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw",
    +      "type": "EcdsaSecp256k1VerificationKey2019",
    +      "publicKeyJwk": {
    +        "crv": "secp256k1",
    +        "kid": "JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw",
    +        "kty": "EC",
    +        "x": "dWCvM4fTdeM0KmloF57zxtBPXTOythHPMm1HCLrdd3A",
    +        "y": "36uMVGM7hnw-N6GnjFcihWE3SkrhMLzzLCdPMXPEXlA"
    +      }
    +    }]
    +}
    +
    +

    § usage

    +

    Deprecated. DO NOT USE.

    +

    Was introduced to support key ops pre sidetree protocol spec v1.

    +

    § publicKeyJwk

    +

    A public key in JWK format. A JSON Web Key (JWK) is a JavaScript Object Notation (JSON) data structure that represents a cryptographic key. Read RFC7517.

    +

    Example:

    +
    {
    +  "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw",
    +  "type": "EcdsaSecp256k1VerificationKey2019",
    +  "publicKeyJwk": {
    +    "crv": "secp256k1",
    +    "kid": "JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw",
    +    "kty": "EC",
    +    "x": "dWCvM4fTdeM0KmloF57zxtBPXTOythHPMm1HCLrdd3A",
    +    "y": "36uMVGM7hnw-N6GnjFcihWE3SkrhMLzzLCdPMXPEXlA"
    +  }
    +}
    +
    +

    § publicKeyHex

    +

    A hex encoded compressed public key.

    +

    Example:

    +
    {
    +  "id": "did:example:123#JUvpllMEYUZ2joO59UNui_XYDqxVqiFLLAJ8klWuPBw",
    +  "type": "EcdsaSecp256k1VerificationKey2019",
    +  "publicKeyHex": "027560af3387d375e3342a6968179ef3c6d04f5d33b2b611cf326d4708badd7770"
    +}
    +
    +

    § Method & Client Guidelines

    +

    The following are advisements and best practices for DID Method and Client (SDK, wallets, etc.) implementers that interact with Sidetree-based DID Methods. These guidelines should be carefully considered when implementing or interacting with a Sidetree-based DID Method.

    +

    § Sidetree Operations

    +

    A Sidetree client manages keys and performs document operations on behalf of a DID owner. The Sidetree client needs to comply to the following guidelines to securely, successfully manage a user’s DIDs:

    +
      +
    1. +

      The client MUST keep the operation payload once it is submitted to a Sidetree node until it is generally available and observed. If the submitted operation is not anchored and propagated, for whatever reason, the same operation payload MUST be resubmitted. Submitting a different operation payload can put the DID at risk of late publish branching, which can lead to an unrecoverable DID if the original operation payload contains a recovery key rotation and that recovery key is lost. While this is a fringe possible issue, it’s best to just retain these small operation payloads.

      +
    2. +
    3. +

      Another reason to retain operation payloads is to always have them available in the case you want to serve them across the backing Content Addressable Storage network. Most users won’t elect to do this, but advanced wallets and users who seek maximum independence from any reliance on the persistence of their operations in the network may want to.

      +
    4. +
    +

    § Update vs Recovery Keys

    +

    It is advised that clients managing DIDs try as best as possible to separate the concepts of Update and Recovery keys. Compromise or loss of Update keys does not permanently imperil a user’s control over their DID, where a loss or compromise of a Recovery key will, As such, it is important to create appropriate protections and processes for securing and using each type of key, commensurate with their level of control and risk.

    +

    § Appendix

    +

    § Test Vectors

    +

    The Resolution test vectors are the result of applying operations and obtaining resolution results.

    +
      +
    • The Create Resolution is generated by applying the Create Operation input vector and then resolving shortFormDid.
    • +
    • The Update Resolution is generated by applying the Create Operation followed by the Update Operation from the input vector and then resolving shortFormDid.
    • +
    • The Recover Resolution is generated by applying the Create Operation followed by the Recover Operation from the input vector and then resolving shortFormDid.
    • +
    • The Deactivate Resolution is generated by applying the Create Operation followed by the Recover Operation followed by the Deactivate Operation from the input vector and then resolving shortFormDid.
    • +
    • The Long-Form Resolution is generated by resolving longFormDid.
    • +
    +

    § DID

    +
    {
    +  "longFormDid": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg:eyJkZWx0YSI6eyJwYXRjaGVzIjpbeyJhY3Rpb24iOiJyZXBsYWNlIiwiZG9jdW1lbnQiOnsicHVibGljS2V5cyI6W3siaWQiOiJwdWJsaWNLZXlNb2RlbDFJZCIsInB1YmxpY0tleUp3ayI6eyJjcnYiOiJzZWNwMjU2azEiLCJrdHkiOiJFQyIsIngiOiJ0WFNLQl9ydWJYUzdzQ2pYcXVwVkpFelRjVzNNc2ptRXZxMVlwWG45NlpnIiwieSI6ImRPaWNYcWJqRnhvR0otSzAtR0oxa0hZSnFpY19EX09NdVV3a1E3T2w2bmsifSwicHVycG9zZXMiOlsiYXV0aGVudGljYXRpb24iLCJrZXlBZ3JlZW1lbnQiXSwidHlwZSI6IkVjZHNhU2VjcDI1NmsxVmVyaWZpY2F0aW9uS2V5MjAxOSJ9XSwic2VydmljZXMiOlt7ImlkIjoic2VydmljZTFJZCIsInNlcnZpY2VFbmRwb2ludCI6Imh0dHA6Ly93d3cuc2VydmljZTEuY29tIiwidHlwZSI6InNlcnZpY2UxVHlwZSJ9XX19XSwidXBkYXRlQ29tbWl0bWVudCI6IkVpREtJa3dxTzY5SVBHM3BPbEhrZGI4Nm5ZdDBhTnhTSFp1MnItYmhFem5qZEEifSwic3VmZml4RGF0YSI6eyJkZWx0YUhhc2giOiJFaUNmRFdSbllsY0Q5RUdBM2RfNVoxQUh1LWlZcU1iSjluZmlxZHo1UzhWRGJnIiwicmVjb3ZlcnlDb21taXRtZW50IjoiRWlCZk9aZE10VTZPQnc4UGs4NzlRdFotMkotOUZiYmpTWnlvYUFfYnFENHpoQSJ9fQ",
    +  "shortFormDid": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"
    +}
    +
    +
    +

    § Operation Inputs

    +

    The following operation inputs are in the form of Sidetree REST API Operations.

    +
    § Create
    +
    {
    +  "type": "create",
    +  "suffixData": {
    +    "deltaHash": "EiCfDWRnYlcD9EGA3d_5Z1AHu-iYqMbJ9nfiqdz5S8VDbg",
    +    "recoveryCommitment": "EiBfOZdMtU6OBw8Pk879QtZ-2J-9FbbjSZyoaA_bqD4zhA"
    +  },
    +  "delta": {
    +    "updateCommitment": "EiDKIkwqO69IPG3pOlHkdb86nYt0aNxSHZu2r-bhEznjdA",
    +    "patches": [
    +      {
    +        "action": "replace",
    +        "document": {
    +          "publicKeys": [
    +            {
    +              "id": "publicKeyModel1Id",
    +              "type": "EcdsaSecp256k1VerificationKey2019",
    +              "publicKeyJwk": {
    +                "kty": "EC",
    +                "crv": "secp256k1",
    +                "x": "tXSKB_rubXS7sCjXqupVJEzTcW3MsjmEvq1YpXn96Zg",
    +                "y": "dOicXqbjFxoGJ-K0-GJ1kHYJqic_D_OMuUwkQ7Ol6nk"
    +              },
    +              "purposes": [
    +                "authentication",
    +                "keyAgreement"
    +              ]
    +            }
    +          ],
    +          "services": [
    +            {
    +              "id": "service1Id",
    +              "type": "service1Type",
    +              "serviceEndpoint": "http://www.service1.com"
    +            }
    +          ]
    +        }
    +      }
    +    ]
    +  }
    +}
    +
    +
    +
    § Update
    +
    {
    +  "type": "update",
    +  "didSuffix": "EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +  "revealValue": "EiBkRSeixqX-PhOij6PIpuGfPld5Nif5MxcrgtGCw-t6LA",
    +  "delta": {
    +    "patches": [
    +      {
    +        "action": "add-public-keys",
    +        "publicKeys": [
    +          {
    +            "id": "additional-key",
    +            "type": "EcdsaSecp256k1VerificationKey2019",
    +            "publicKeyJwk": {
    +              "kty": "EC",
    +              "crv": "secp256k1",
    +              "x": "aN75CTjy3VCgGAJDNJHbcb55hO8CobEKzgCNrUeOwAY",
    +              "y": "K9FhCEpa_jG09pB6qriXrgSvKzXm6xtxBvZzIoXXWm4"
    +            },
    +            "purposes": [
    +              "authentication",
    +              "assertionMethod",
    +              "capabilityInvocation",
    +              "capabilityDelegation",
    +              "keyAgreement"
    +            ]
    +          }
    +        ]
    +      }
    +    ],
    +    "updateCommitment": "EiDOrcmPtfMHuwIWN6YoihdeIPxOKDHy3D6sdMXu_7CN0w"
    +  },
    +  "signedData": "eyJhbGciOiJFUzI1NksifQ.eyJ1cGRhdGVLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJzZWNwMjU2azEiLCJ4Ijoid2Z3UUNKM09ScVZkbkhYa1Q4UC1MZ19HdHhCRWhYM3R5OU5VbnduSHJtdyIsInkiOiJ1aWU4cUxfVnVBblJEZHVwaFp1eExPNnFUOWtQcDNLUkdFSVJsVHBXcmZVIn0sImRlbHRhSGFzaCI6IkVpQ3BqTjQ3ZjBNcTZ4RE5VS240aFNlZ01FcW9EU19ycFEyOVd5MVY3M1ZEYncifQ.RwZK1DG5zcr4EsrRImzStb0VX5j2ZqApXZnuoAkA3IoRdErUscNG8RuxNZ0FjlJtjMJ0a-kn-_MdtR0wwvWVgg"
    +}
    +
    +
    +
    § Recover
    +
    {
    +  "type": "recover",
    +  "didSuffix": "EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +  "revealValue": "EiAJ-97Is59is6FKAProwDo870nmwCeP8n5nRRFwPpUZVQ",
    +  "signedData": "eyJhbGciOiJFUzI1NksifQ.eyJkZWx0YUhhc2giOiJFaUNTem1ZSk0yWGpaWE00a1Q0bGpKcEVGTjVmVkM1QVNWZ3hSekVtMEF2OWp3IiwicmVjb3ZlcnlLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJzZWNwMjU2azEiLCJ4IjoibklxbFJDeDBleUJTWGNRbnFEcFJlU3Y0enVXaHdDUldzc29jOUxfbmo2QSIsInkiOiJpRzI5Vks2bDJVNXNLQlpVU0plUHZ5RnVzWGdTbEsyZERGbFdhQ004RjdrIn0sInJlY292ZXJ5Q29tbWl0bWVudCI6IkVpQ3NBN1NHTE5lZGE1SW5sb3Fub2tVY0pGejZ2S1Q0SFM1ZGNLcm1ubEpocEEifQ.lxWnrg5jaeCAhYuz1fPhidKw6Z2cScNlEc6SWcs15DtJbrHZFxl5IezGJ3cWdOSS2DlzDl4M1ZF8dDE9kRwFeQ",
    +  "delta": {
    +    "patches": [
    +      {
    +        "action": "replace",
    +        "document": {
    +          "publicKeys": [
    +            {
    +              "id": "newKey",
    +              "type": "EcdsaSecp256k1VerificationKey2019",
    +              "publicKeyJwk": {
    +                "kty": "EC",
    +                "crv": "secp256k1",
    +                "x": "JUWp0pAMGevNLhqq_Qmd48izuLYfO5XWpjSmy5btkjc",
    +                "y": "QYaSu1NHYnxR4qfk-RkXb4NQnQf1X3XQCpDYuibvlNc"
    +              },
    +              "purposes": [
    +                "authentication",
    +                "assertionMethod",
    +                "capabilityInvocation",
    +                "capabilityDelegation",
    +                "keyAgreement"
    +              ]
    +            }
    +          ],
    +          "services": [
    +            {
    +              "id": "serviceId123",
    +              "type": "someType",
    +              "serviceEndpoint": "https://www.url.com"
    +            }
    +          ]
    +        }
    +      }
    +    ],
    +    "updateCommitment": "EiD6_csybTfxELBoMgkE9O2BTCmhScG_RW_qaZQkIkJ_aQ"
    +  }
    +}
    +
    +
    +
    § Deactivate
    +
    {
    +  "type": "deactivate",
    +  "didSuffix": "EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +  "revealValue": "EiB-dib5oumdaDGH47TB17Qg1nHza036bTIGibQOKFUY2A",
    +  "signedData": "eyJhbGciOiJFUzI1NksifQ.eyJkaWRTdWZmaXgiOiJFaUR5T1FiYlpBYTNhaVJ6ZUNrVjdMT3gzU0VSampIOTNFWG9JTTNVb040b1dnIiwicmVjb3ZlcnlLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJzZWNwMjU2azEiLCJ4IjoiSk1ucF9KOW5BSGFkTGpJNmJfNVU3M1VwSEZqSEZTVHdtc1ZUUG9FTTVsMCIsInkiOiJ3c1QxLXN0UWJvSldPeEJyUnVINHQwVV9zX1lSQy14WXQyRkFEVUNHR2M4In19.ARTZrvupKdShOFNAJ4EWnsuaONKBgXUiwY5Ct10a9IXIp1uFsg0UyDnZGZtJT2v2bgtmYsQBmT6L9kKaaDcvUQ"
    +}
    +
    +
    +

    § Resolution

    +
    § Create
    +
    {
    +  "@context": "https://w3id.org/did-resolution/v1",
    +  "didDocument": {
    +    "id": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +    "@context": [
    +      "https://www.w3.org/ns/did/v1",
    +      {
    +        "@base": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"
    +      }
    +    ],
    +    "service": [
    +      {
    +        "id": "#service1Id",
    +        "type": "service1Type",
    +        "serviceEndpoint": "http://www.service1.com"
    +      }
    +    ],
    +    "verificationMethod": [
    +      {
    +        "id": "#publicKeyModel1Id",
    +        "controller": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +        "type": "EcdsaSecp256k1VerificationKey2019",
    +        "publicKeyJwk": {
    +          "kty": "EC",
    +          "crv": "secp256k1",
    +          "x": "tXSKB_rubXS7sCjXqupVJEzTcW3MsjmEvq1YpXn96Zg",
    +          "y": "dOicXqbjFxoGJ-K0-GJ1kHYJqic_D_OMuUwkQ7Ol6nk"
    +        }
    +      }
    +    ],
    +    "authentication": [
    +      "#publicKeyModel1Id"
    +    ],
    +    "keyAgreement": [
    +      "#publicKeyModel1Id"
    +    ]
    +  },
    +  "didDocumentMetadata": {
    +    "canonicalId": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +    "method": {
    +      "published": true,
    +      "recoveryCommitment": "EiBfOZdMtU6OBw8Pk879QtZ-2J-9FbbjSZyoaA_bqD4zhA",
    +      "updateCommitment": "EiDKIkwqO69IPG3pOlHkdb86nYt0aNxSHZu2r-bhEznjdA"
    +    }
    +  }
    +}
    +
    +
    § Update
    +
    {
    +  "@context": "https://w3id.org/did-resolution/v1",
    +  "didDocument": {
    +    "id": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +    "@context": [
    +      "https://www.w3.org/ns/did/v1",
    +      {
    +        "@base": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"
    +      }
    +    ],
    +    "service": [
    +      {
    +        "id": "#service1Id",
    +        "type": "service1Type",
    +        "serviceEndpoint": "http://www.service1.com"
    +      }
    +    ],
    +    "verificationMethod": [
    +      {
    +        "id": "#publicKeyModel1Id",
    +        "controller": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +        "type": "EcdsaSecp256k1VerificationKey2019",
    +        "publicKeyJwk": {
    +          "kty": "EC",
    +          "crv": "secp256k1",
    +          "x": "tXSKB_rubXS7sCjXqupVJEzTcW3MsjmEvq1YpXn96Zg",
    +          "y": "dOicXqbjFxoGJ-K0-GJ1kHYJqic_D_OMuUwkQ7Ol6nk"
    +        }
    +      },
    +      {
    +        "id": "#additional-key",
    +        "controller": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +        "type": "EcdsaSecp256k1VerificationKey2019",
    +        "publicKeyJwk": {
    +          "kty": "EC",
    +          "crv": "secp256k1",
    +          "x": "aN75CTjy3VCgGAJDNJHbcb55hO8CobEKzgCNrUeOwAY",
    +          "y": "K9FhCEpa_jG09pB6qriXrgSvKzXm6xtxBvZzIoXXWm4"
    +        }
    +      }
    +    ],
    +    "authentication": [
    +      "#publicKeyModel1Id",
    +      "#additional-key"
    +    ],
    +    "keyAgreement": [
    +      "#publicKeyModel1Id",
    +      "#additional-key"
    +    ],
    +    "assertionMethod": [
    +      "#additional-key"
    +    ],
    +    "capabilityInvocation": [
    +      "#additional-key"
    +    ],
    +    "capabilityDelegation": [
    +      "#additional-key"
    +    ]
    +  },
    +  "didDocumentMetadata": {
    +    "canonicalId": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +    "method": {
    +      "published": true,
    +      "recoveryCommitment": "EiBfOZdMtU6OBw8Pk879QtZ-2J-9FbbjSZyoaA_bqD4zhA",
    +      "updateCommitment": "EiDOrcmPtfMHuwIWN6YoihdeIPxOKDHy3D6sdMXu_7CN0w"
    +    }
    +  }
    +}
    +
    +
    § Recover
    +
    {
    +  "@context": "https://w3id.org/did-resolution/v1",
    +  "didDocument": {
    +    "id": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +    "@context": [
    +      "https://www.w3.org/ns/did/v1",
    +      {
    +        "@base": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"
    +      }
    +    ],
    +    "service": [
    +      {
    +        "id": "#serviceId123",
    +        "type": "someType",
    +        "serviceEndpoint": "https://www.url.com"
    +      }
    +    ],
    +    "verificationMethod": [
    +      {
    +        "id": "#newKey",
    +        "controller": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +        "type": "EcdsaSecp256k1VerificationKey2019",
    +        "publicKeyJwk": {
    +          "kty": "EC",
    +          "crv": "secp256k1",
    +          "x": "JUWp0pAMGevNLhqq_Qmd48izuLYfO5XWpjSmy5btkjc",
    +          "y": "QYaSu1NHYnxR4qfk-RkXb4NQnQf1X3XQCpDYuibvlNc"
    +        }
    +      }
    +    ],
    +    "authentication": [
    +      "#newKey"
    +    ],
    +    "assertionMethod": [
    +      "#newKey"
    +    ],
    +    "capabilityInvocation": [
    +      "#newKey"
    +    ],
    +    "capabilityDelegation": [
    +      "#newKey"
    +    ],
    +    "keyAgreement": [
    +      "#newKey"
    +    ]
    +  },
    +  "didDocumentMetadata": {
    +    "canonicalId": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg",
    +    "method": {
    +      "published": true,
    +      "recoveryCommitment": "EiCsA7SGLNeda5InloqnokUcJFz6vKT4HS5dcKrmnlJhpA",
    +      "updateCommitment": "EiD6_csybTfxELBoMgkE9O2BTCmhScG_RW_qaZQkIkJ_aQ"
    +    }
    +  }
    +}
    +
    +
    § Deactivate
    +
    {
    +  "@context": "https://w3id.org/did-resolution/v1",
    +  "didDocument": { 
    +    "id": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg", 
    +    "@context": [ 
    +      "https://www.w3.org/ns/did/v1", 
    +      { "@base": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg" }
    +    ] 
    +  },
    +  "didDocumentMetadata": {
    +    "deactivated": true,
    +    "method": {
    +      "published": true
    +    },
    +    "canonicalId": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"
    +  }
    +}
    +
    +
    § Long-Form Response
    +
    {
    +  "@context": "https://w3id.org/did-resolution/v1",
    +  "didDocument": {
    +    "id": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg:eyJkZWx0YSI6eyJwYXRjaGVzIjpbeyJhY3Rpb24iOiJyZXBsYWNlIiwiZG9jdW1lbnQiOnsicHVibGljS2V5cyI6W3siaWQiOiJwdWJsaWNLZXlNb2RlbDFJZCIsInB1YmxpY0tleUp3ayI6eyJjcnYiOiJzZWNwMjU2azEiLCJrdHkiOiJFQyIsIngiOiJ0WFNLQl9ydWJYUzdzQ2pYcXVwVkpFelRjVzNNc2ptRXZxMVlwWG45NlpnIiwieSI6ImRPaWNYcWJqRnhvR0otSzAtR0oxa0hZSnFpY19EX09NdVV3a1E3T2w2bmsifSwicHVycG9zZXMiOlsiYXV0aGVudGljYXRpb24iLCJrZXlBZ3JlZW1lbnQiXSwidHlwZSI6IkVjZHNhU2VjcDI1NmsxVmVyaWZpY2F0aW9uS2V5MjAxOSJ9XSwic2VydmljZXMiOlt7ImlkIjoic2VydmljZTFJZCIsInNlcnZpY2VFbmRwb2ludCI6Imh0dHA6Ly93d3cuc2VydmljZTEuY29tIiwidHlwZSI6InNlcnZpY2UxVHlwZSJ9XX19XSwidXBkYXRlQ29tbWl0bWVudCI6IkVpREtJa3dxTzY5SVBHM3BPbEhrZGI4Nm5ZdDBhTnhTSFp1MnItYmhFem5qZEEifSwic3VmZml4RGF0YSI6eyJkZWx0YUhhc2giOiJFaUNmRFdSbllsY0Q5RUdBM2RfNVoxQUh1LWlZcU1iSjluZmlxZHo1UzhWRGJnIiwicmVjb3ZlcnlDb21taXRtZW50IjoiRWlCZk9aZE10VTZPQnc4UGs4NzlRdFotMkotOUZiYmpTWnlvYUFfYnFENHpoQSJ9fQ",
    +    "@context": [
    +      "https://www.w3.org/ns/did/v1",
    +      {
    +        "@base": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg:eyJkZWx0YSI6eyJwYXRjaGVzIjpbeyJhY3Rpb24iOiJyZXBsYWNlIiwiZG9jdW1lbnQiOnsicHVibGljS2V5cyI6W3siaWQiOiJwdWJsaWNLZXlNb2RlbDFJZCIsInB1YmxpY0tleUp3ayI6eyJjcnYiOiJzZWNwMjU2azEiLCJrdHkiOiJFQyIsIngiOiJ0WFNLQl9ydWJYUzdzQ2pYcXVwVkpFelRjVzNNc2ptRXZxMVlwWG45NlpnIiwieSI6ImRPaWNYcWJqRnhvR0otSzAtR0oxa0hZSnFpY19EX09NdVV3a1E3T2w2bmsifSwicHVycG9zZXMiOlsiYXV0aGVudGljYXRpb24iLCJrZXlBZ3JlZW1lbnQiXSwidHlwZSI6IkVjZHNhU2VjcDI1NmsxVmVyaWZpY2F0aW9uS2V5MjAxOSJ9XSwic2VydmljZXMiOlt7ImlkIjoic2VydmljZTFJZCIsInNlcnZpY2VFbmRwb2ludCI6Imh0dHA6Ly93d3cuc2VydmljZTEuY29tIiwidHlwZSI6InNlcnZpY2UxVHlwZSJ9XX19XSwidXBkYXRlQ29tbWl0bWVudCI6IkVpREtJa3dxTzY5SVBHM3BPbEhrZGI4Nm5ZdDBhTnhTSFp1MnItYmhFem5qZEEifSwic3VmZml4RGF0YSI6eyJkZWx0YUhhc2giOiJFaUNmRFdSbllsY0Q5RUdBM2RfNVoxQUh1LWlZcU1iSjluZmlxZHo1UzhWRGJnIiwicmVjb3ZlcnlDb21taXRtZW50IjoiRWlCZk9aZE10VTZPQnc4UGs4NzlRdFotMkotOUZiYmpTWnlvYUFfYnFENHpoQSJ9fQ"
    +      }
    +    ],
    +    "service": [
    +      {
    +        "id": "#service1Id",
    +        "type": "service1Type",
    +        "serviceEndpoint": "http://www.service1.com"
    +      }
    +    ],
    +    "verificationMethod": [
    +      {
    +        "id": "#publicKeyModel1Id",
    +        "controller": "did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg:eyJkZWx0YSI6eyJwYXRjaGVzIjpbeyJhY3Rpb24iOiJyZXBsYWNlIiwiZG9jdW1lbnQiOnsicHVibGljS2V5cyI6W3siaWQiOiJwdWJsaWNLZXlNb2RlbDFJZCIsInB1YmxpY0tleUp3ayI6eyJjcnYiOiJzZWNwMjU2azEiLCJrdHkiOiJFQyIsIngiOiJ0WFNLQl9ydWJYUzdzQ2pYcXVwVkpFelRjVzNNc2ptRXZxMVlwWG45NlpnIiwieSI6ImRPaWNYcWJqRnhvR0otSzAtR0oxa0hZSnFpY19EX09NdVV3a1E3T2w2bmsifSwicHVycG9zZXMiOlsiYXV0aGVudGljYXRpb24iLCJrZXlBZ3JlZW1lbnQiXSwidHlwZSI6IkVjZHNhU2VjcDI1NmsxVmVyaWZpY2F0aW9uS2V5MjAxOSJ9XSwic2VydmljZXMiOlt7ImlkIjoic2VydmljZTFJZCIsInNlcnZpY2VFbmRwb2ludCI6Imh0dHA6Ly93d3cuc2VydmljZTEuY29tIiwidHlwZSI6InNlcnZpY2UxVHlwZSJ9XX19XSwidXBkYXRlQ29tbWl0bWVudCI6IkVpREtJa3dxTzY5SVBHM3BPbEhrZGI4Nm5ZdDBhTnhTSFp1MnItYmhFem5qZEEifSwic3VmZml4RGF0YSI6eyJkZWx0YUhhc2giOiJFaUNmRFdSbllsY0Q5RUdBM2RfNVoxQUh1LWlZcU1iSjluZmlxZHo1UzhWRGJnIiwicmVjb3ZlcnlDb21taXRtZW50IjoiRWlCZk9aZE10VTZPQnc4UGs4NzlRdFotMkotOUZiYmpTWnlvYUFfYnFENHpoQSJ9fQ",
    +        "type": "EcdsaSecp256k1VerificationKey2019",
    +        "publicKeyJwk": {
    +          "crv": "secp256k1",
    +          "kty": "EC",
    +          "x": "tXSKB_rubXS7sCjXqupVJEzTcW3MsjmEvq1YpXn96Zg",
    +          "y": "dOicXqbjFxoGJ-K0-GJ1kHYJqic_D_OMuUwkQ7Ol6nk"
    +        }
    +      }
    +    ],
    +    "authentication": [
    +      "#publicKeyModel1Id"
    +    ],
    +    "keyAgreement": [
    +      "#publicKeyModel1Id"
    +    ]
    +  },
    +  "didDocumentMetadata": {
    +    "equivalentId": ["did:sidetree:EiDyOQbbZAa3aiRzeCkV7LOx3SERjjH93EXoIM3UoN4oWg"],
    +    "method": {
    +      "published": false,
    +      "recoveryCommitment": "EiBfOZdMtU6OBw8Pk879QtZ-2J-9FbbjSZyoaA_bqD4zhA",
    +      "updateCommitment": "EiDKIkwqO69IPG3pOlHkdb86nYt0aNxSHZu2r-bhEznjdA"
    +    }
    +  }
    +}
    +
    +

    § Acknowledgements

    +

    Transmute received funding from the United States Department of Homeland Security’s (US DHS) Silicon Valley Innovation Program to contribute to this work item under contracts 70RSAT20T00000003, and 70RSAT20T00000033. This work item does not necessarily reflect the position or the policy of the U.S. Government and no official endorsement should be inferred. DIF is not lending or implying support/affiliation with the outside entity as a part of the acknowledgement.

    + +
    + +
    + + + +
    + + + + + +
    +
      +
      + + +
      + Table of Contents + +
      + +
      + +
      + + + + + + + \ No newline at end of file