From 623f3a9709fc6d61b13f8887274314ec8410b8ac Mon Sep 17 00:00:00 2001 From: lmuswere Date: Tue, 2 Jun 2020 13:40:52 +0100 Subject: [PATCH] Private data benchmarks Signed-off-by: lmuswere --- .../contract/create-private-asset.yaml | 130 ++++ .../couchDB/contract/get-private-asset.yaml | 153 ++++ .../contract/create-private-asset.yaml | 130 ++++ .../levelDB/contract/get-private-asset.yaml | 154 ++++ .../api/fabric/lib/create-private-asset.js | 54 ++ .../api/fabric/lib/get-private-asset.js | 59 ++ benchmarks/api/fabric/lib/helper.js | 20 +- networks/fabric/ansible-playbooks/README.md | 61 ++ .../ansible-playbooks/v1/2org1peer/.gitignore | 9 + .../v1/2org1peer/collections-config.json | 10 + .../v1/2org1peer/fixed-asset@1.0.0.cds | Bin 0 -> 2862 bytes .../v1/2org1peer/playbook.yml | 189 +++++ .../v1/2org1peer/requirements.yml | 5 + .../ansible-playbooks/v2/2org1peer/.gitignore | 9 + .../v2/2org1peer/collections-config.json | 10 + .../ansible-playbooks/v2/2org1peer/core.yaml | 703 ++++++++++++++++++ .../v2/2org1peer/fixed-asset@1.0.0.tar.gz | Bin 0 -> 2968 bytes .../v2/2org1peer/playbook.yml | 190 +++++ .../v2/2org1peer/requirements.yml | 5 + .../api/fixed-asset/node/lib/fixed-asset.js | 79 ++ 20 files changed, 1965 insertions(+), 5 deletions(-) create mode 100644 benchmarks/api/fabric/couchDB/contract/create-private-asset.yaml create mode 100644 benchmarks/api/fabric/couchDB/contract/get-private-asset.yaml create mode 100644 benchmarks/api/fabric/levelDB/contract/create-private-asset.yaml create mode 100644 benchmarks/api/fabric/levelDB/contract/get-private-asset.yaml create mode 100644 benchmarks/api/fabric/lib/create-private-asset.js create mode 100644 benchmarks/api/fabric/lib/get-private-asset.js create mode 100644 networks/fabric/ansible-playbooks/README.md create mode 100644 networks/fabric/ansible-playbooks/v1/2org1peer/.gitignore create mode 100644 networks/fabric/ansible-playbooks/v1/2org1peer/collections-config.json create mode 100644 networks/fabric/ansible-playbooks/v1/2org1peer/fixed-asset@1.0.0.cds create mode 100644 networks/fabric/ansible-playbooks/v1/2org1peer/playbook.yml create mode 100644 networks/fabric/ansible-playbooks/v1/2org1peer/requirements.yml create mode 100644 networks/fabric/ansible-playbooks/v2/2org1peer/.gitignore create mode 100644 networks/fabric/ansible-playbooks/v2/2org1peer/collections-config.json create mode 100644 networks/fabric/ansible-playbooks/v2/2org1peer/core.yaml create mode 100644 networks/fabric/ansible-playbooks/v2/2org1peer/fixed-asset@1.0.0.tar.gz create mode 100644 networks/fabric/ansible-playbooks/v2/2org1peer/playbook.yml create mode 100644 networks/fabric/ansible-playbooks/v2/2org1peer/requirements.yml diff --git a/benchmarks/api/fabric/couchDB/contract/create-private-asset.yaml b/benchmarks/api/fabric/couchDB/contract/create-private-asset.yaml new file mode 100644 index 000000000..0a0661d57 --- /dev/null +++ b/benchmarks/api/fabric/couchDB/contract/create-private-asset.yaml @@ -0,0 +1,130 @@ +--- +test: + name: create-private-asset-size-ramp-couchDB + description: This is a duration based benchmark targeting a Hyperledger Fabric network with a CouchDB world state database using the `fixed-asset` NodeJS chaincode contract that is interacted with via + a Fabric-SDK-Node Gateway. Each test round invokes the `createPrivateAsset` method, with successive rounds increasing the bytesize of the asset + added into the Private data store. + workers: + type: local + number: 5 + rounds: + - label: create-private-asset-8000-fixed-tps + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 8000 bytes into the Private data store at a fixed TPS rate. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-rate, opts: { tps: 15} } + arguments: + chaincodeID: fixed-asset + bytesize: 8000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-100 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 100 bytes into the Private data store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10, startingTps: 1} } + arguments: + chaincodeID: fixed-asset + bytesize: 100 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-200 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 200 bytes into the Private data store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10, startingTps: 1} } + arguments: + chaincodeID: fixed-asset + bytesize: 200 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-500 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 500 bytes into the Private data store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10, startingTps: 1} } + arguments: + chaincodeID: fixed-asset + bytesize: 500 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-1000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 1000 bytes into the Private data store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10, startingTps: 1} } + arguments: + chaincodeID: fixed-asset + bytesize: 1000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-2000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 2000 bytes into the Private data store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10, startingTps: 1} } + arguments: + chaincodeID: fixed-asset + bytesize: 2000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-5000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 5000 bytes into the Private data store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10, startingTps: 1} } + arguments: + chaincodeID: fixed-asset + bytesize: 5000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-10000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 10000 bytes into the Private data store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10, startingTps: 1} } + arguments: + chaincodeID: fixed-asset + bytesize: 10000 + callback: benchmarks/api/fabric/lib/create-private-asset.js +monitor: + type: + - prometheus + prometheus: + url: "http://localhost:9090" + push_url: "http://localhost:9091" + metrics: + ignore: [prometheus, pushGateway, cadvisor, grafana, node-exporter, ca.org1.example.com, ca.org2.example.com] + include: + Avg Memory (MB): + query: sum(container_memory_rss{name=~".+"}) by (name) + step: 10 + label: name + statistic: avg + multiplier: 0.000001 + CPU (%): + query: sum(rate(container_cpu_usage_seconds_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: avg + multiplier: 100 + Network In (MB): + query: sum(rate(container_network_receive_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Network Out (MB): + query: sum(rate(container_network_transmit_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Write (MB): + query: sum(rate(container_fs_writes_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Read (MB): + query: sum(rate(container_fs_reads_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 +observer: + type: prometheus + interval: 10 diff --git a/benchmarks/api/fabric/couchDB/contract/get-private-asset.yaml b/benchmarks/api/fabric/couchDB/contract/get-private-asset.yaml new file mode 100644 index 000000000..60e0f66fc --- /dev/null +++ b/benchmarks/api/fabric/couchDB/contract/get-private-asset.yaml @@ -0,0 +1,153 @@ +--- +test: + name: get-private-asset-ramp-couchDB + description: This is a duration based benchmark targeting a Hyperledger Fabric network with a CouchDB world state database using the `fixed-asset` NodeJS chaincode contract that is interacted with via + a Fabric-SDK-Node Gateway. Each test round invokes the 'getPrivateAsset()' API method. Successive rounds create and retrieve assets of larger bytesize. + workers: + type: local + number: 10 + rounds: + - label: get-private-asset-evaluate-100 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 100 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + create_sizes: [100,200,500,1000,2000,5000,10000] + assets: 1000 + bytesize: 100 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-200 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 200 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + assets: 1000 + bytesize: 200 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-500 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 500 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 500 + assets: 1000 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-1000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 1000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 1000 + assets: 1000 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-2000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 2000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 2000 + assets: 1000 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-5000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 5000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 5000 + assets: 1000 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-10000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 10000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 10000 + assets: 1000 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-8000-fixed-tps + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getState on an item that matches an asset of size 8000 bytes at a fixed TPS. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-rate, opts: { tps: 350 }} + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 8000 + assets: 1000 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js +monitor: + type: + - prometheus + prometheus: + url: "http://localhost:9090" + push_url: "http://localhost:9091" + metrics: + ignore: [prometheus, pushGateway, cadvisor, grafana, node-exporter, ca.org1.example.com, ca.org2.example.com] + include: + Avg Memory (MB): + query: sum(container_memory_rss{name=~".+"}) by (name) + step: 10 + label: name + statistic: avg + multiplier: 0.000001 + CPU (%): + query: sum(rate(container_cpu_usage_seconds_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: avg + multiplier: 100 + Network In (MB): + query: sum(rate(container_network_receive_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Network Out (MB): + query: sum(rate(container_network_transmit_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Write (MB): + query: sum(rate(container_fs_writes_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Read (MB): + query: sum(rate(container_fs_reads_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 +observer: + type: prometheus + interval: 10 diff --git a/benchmarks/api/fabric/levelDB/contract/create-private-asset.yaml b/benchmarks/api/fabric/levelDB/contract/create-private-asset.yaml new file mode 100644 index 000000000..2377953fe --- /dev/null +++ b/benchmarks/api/fabric/levelDB/contract/create-private-asset.yaml @@ -0,0 +1,130 @@ +--- +test: + name: create-private-asset-size-ramp-levelDB + description: This is a duration based benchmark targeting a Hyperledger Fabric network with a LevelDB world state database using the `fixed-asset` NodeJS chaincode contract that is interacted with via + a Fabric-SDK-Node Gateway. Each test round invokes the `createPrivateAsset` method, with successive rounds increasing the bytesize of the private asset + added into the Private Store + workers: + type: local + number: 10 + rounds: + - label: create-private-asset-8000-fixed-tps + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 8000 bytes into the Private Store at a fixed TPS rate. + chaincodeID: fixed-asset + txDuration: 30 + rateControl: { type: fixed-rate, opts: { tps: 15} } + arguments: + chaincodeID: fixed-asset + bytesize: 8000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-100 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 100 bytes into the Private Store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10 } } + arguments: + chaincodeID: fixed-asset + bytesize: 100 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-200 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 200 bytes into the Private Store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10 } } + arguments: + chaincodeID: fixed-asset + bytesize: 200 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-500 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 500 bytes into the Private Store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10 } } + arguments: + chaincodeID: fixed-asset + bytesize: 500 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-1000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 1000 bytes into the Private Store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10 } } + arguments: + chaincodeID: fixed-asset + bytesize: 1000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-2000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 2000 bytes into the Private Store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10 } } + arguments: + chaincodeID: fixed-asset + bytesize: 2000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-5000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 5000 bytes into the Private Store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10 } } + arguments: + chaincodeID: fixed-asset + bytesize: 5000 + callback: benchmarks/api/fabric/lib/create-private-asset.js + - label: create-private-asset-10000 + description: Test a submitTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `createPrivateAsset`, which inserts an asset of size 10000 bytes into the Private Store. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 10 } } + arguments: + chaincodeID: fixed-asset + bytesize: 10000 + callback: benchmarks/api/fabric/lib/create-private-asset.js +monitor: + type: + - prometheus + prometheus: + url: "http://localhost:9090" + push_url: "http://localhost:9091" + metrics: + ignore: [prometheus, pushGateway, cadvisor, grafana, node-exporter, ca.org1.example.com, ca.org2.example.com] + include: + Avg Memory (MB): + query: sum(container_memory_rss{name=~".+"}) by (name) + step: 10 + label: name + statistic: avg + multiplier: 0.000001 + CPU (%): + query: sum(rate(container_cpu_usage_seconds_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: avg + multiplier: 100 + Network In (MB): + query: sum(rate(container_network_receive_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Network Out (MB): + query: sum(rate(container_network_transmit_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Write (MB): + query: sum(rate(container_fs_writes_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Read (MB): + query: sum(rate(container_fs_reads_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 +observer: + type: prometheus + interval: 10 diff --git a/benchmarks/api/fabric/levelDB/contract/get-private-asset.yaml b/benchmarks/api/fabric/levelDB/contract/get-private-asset.yaml new file mode 100644 index 000000000..230f1d4cf --- /dev/null +++ b/benchmarks/api/fabric/levelDB/contract/get-private-asset.yaml @@ -0,0 +1,154 @@ +--- +test: + name: get-private-asset-ramp-levelDB + description: This is a duration based benchmark targeting a Hyperledger Fabric network with a LevelDB world state database using the `fixed-asset` NodeJS chaincode contract that is interacted with via + a Fabric-SDK-Node Gateway. Each test round invokes the 'getPrivateAsset()' API method. Successive rounds create and retrieve assets of larger bytesize. + workers: + type: local + number: 10 + rounds: + - label: get-private-asset-evaluate-100 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 100 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + create_sizes: [100,200,500,1000,2000,5000,10000] + assets: 1000 + bytesize: 100 + uuid: '500' + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-200 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 200 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 200 + uuid: '500' + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-500 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 500 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 500 + uuid: '500' + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-1000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 1000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 1000 + uuid: '500' + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-2000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 2000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 2000 + uuid: '500' + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-5000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 5000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 5000 + uuid: '500' + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-10000 + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 10000 bytes. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-backlog, opts: { unfinished_per_client: 50 } } + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 10000 + uuid: '500' + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js + - label: get-private-asset-evaluate-8000-fixed-tps + description: Test an evaluateTransaction() Gateway method against the NodeJS `fixed-asset` Smart Contract method named `getPrivateAsset`. This method performs a getPrivateData on an item that matches an asset of size 8000 bytes at a fixed TPS. + chaincodeID: fixed-asset + txDuration: 300 + rateControl: { type: fixed-rate, opts: { tps: 350 }} + arguments: + chaincodeID: fixed-asset + nosetup: true + bytesize: 8000 + assets: 1000 + consensus: false + callback: benchmarks/api/fabric/lib/get-private-asset.js +monitor: + type: + - prometheus + prometheus: + url: "http://localhost:9090" + push_url: "http://localhost:9091" + metrics: + ignore: [prometheus, pushGateway, cadvisor, grafana, node-exporter, ca.org1.example.com, ca.org2.example.com] + include: + Avg Memory (MB): + query: sum(container_memory_rss{name=~".+"}) by (name) + step: 10 + label: name + statistic: avg + multiplier: 0.000001 + CPU (%): + query: sum(rate(container_cpu_usage_seconds_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: avg + multiplier: 100 + Network In (MB): + query: sum(rate(container_network_receive_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Network Out (MB): + query: sum(rate(container_network_transmit_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Write (MB): + query: sum(rate(container_fs_writes_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 + Disc Read (MB): + query: sum(rate(container_fs_reads_bytes_total{name=~".+"}[1m])) by (name) + step: 10 + label: name + statistic: sum + multiplier: 0.000001 +observer: + type: prometheus + interval: 10 diff --git a/benchmarks/api/fabric/lib/create-private-asset.js b/benchmarks/api/fabric/lib/create-private-asset.js new file mode 100644 index 000000000..71e611543 --- /dev/null +++ b/benchmarks/api/fabric/lib/create-private-asset.js @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict'; + +module.exports.info = 'Creating private Asset in Registry'; + +const chaincodeID = 'fixed-asset'; +const bytes = (s) => { + return ~-encodeURI(s).split(/%..|./).length; +}; +let txIndex = 0; +let clientIdx; +let asset = {docType: chaincodeID, content: ''}; +let bc, contx, bytesize; + +module.exports.init = async function(blockchain, context, args) { + bc = blockchain; + contx = context; + clientIdx = context.clientIdx; + bytesize = args.bytesize; + + asset.creator = 'client' + clientIdx; + asset.bytesize = bytesize; + + const rand = 'random'; + let idx = 0; + while (bytes(JSON.stringify(asset)) < bytesize) { + const letter = rand.charAt(idx); + idx = idx >= rand.length ? 0 : idx+1; + asset.content = asset.content + letter; + } + + contx = context; +} + +module.exports.run = function() { + const uuid = 'client' + clientIdx + '_' + bytesize + '_' + txIndex; + asset.uuid = uuid; + txIndex++; + + const myArgs = { + chaincodeFunction: 'createPrivateAsset', + chaincodeArguments: [uuid], + transientData: {content: JSON.stringify(asset)} + } + + return bc.bcObj.invokeSmartContract(contx, chaincodeID, undefined, myArgs); +} + +module.exports.end = function() { + return Promise.resolve(); +}; diff --git a/benchmarks/api/fabric/lib/get-private-asset.js b/benchmarks/api/fabric/lib/get-private-asset.js new file mode 100644 index 000000000..ec70e7c4f --- /dev/null +++ b/benchmarks/api/fabric/lib/get-private-asset.js @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict'; + +const helper = require('./helper'); + +module.exports.info = 'Get private Asset of fixed size.'; + +const chaincodeID = 'fixed-asset'; +let clientIdx, assets, bytesize, consensus; +let bc, contx; + +module.exports.init = async function(blockchain, context, args) { + bc = blockchain; + contx = context; + clientIdx = context.clientIdx; + + contx = context; + + assets = args.assets ? parseInt(args.assets) : 0; + + bytesize = args.bytesize; + consensus = args.consensus ? (args.consensus === 'true' || args.consensus === true): false; + const nosetup = args.nosetup ? (args.nosetup === 'true' || args.nosetup === true) : false; + + if (nosetup) { + console.log(' -> Skipping asset creation stage'); + } else { + console.log(' -> Entering asset creation stage'); + await helper.addBatchAssets(bc.bcObj, contx, clientIdx, args, true); + console.log(' -> Test asset creation complete'); + } + + return Promise.resolve(); +}; + +module.exports.run = function() { + // Create argument array [functionName(String), otherArgs(String)] + const uuid = Math.floor(Math.random() * Math.floor(assets)); + const itemKey = 'client' + clientIdx + '_' + bytesize + '_' + uuid; + + const myArgs = { + chaincodeFunction: 'getPrivateAsset', + chaincodeArguments: [itemKey] + }; + + // consensus or non-con query + if (consensus) { + return bc.bcObj.invokeSmartContract(contx, chaincodeID, undefined, myArgs); + } else { + return bc.bcObj.querySmartContract(contx, chaincodeID, undefined, myArgs); + } +}; + +module.exports.end = function() { + return Promise.resolve(); +}; diff --git a/benchmarks/api/fabric/lib/helper.js b/benchmarks/api/fabric/lib/helper.js index 2344977a2..e94f0911a 100644 --- a/benchmarks/api/fabric/lib/helper.js +++ b/benchmarks/api/fabric/lib/helper.js @@ -30,7 +30,7 @@ module.exports.retrieveRandomAssetIds = function(assetNumber) { * @param {Integer} clientIdx the client index * @param {Object} args the client arguments */ -module.exports.addBatchAssets = async function(bcObj, context, clientIdx, args) { +module.exports.addBatchAssets = async function(bcObj, context, clientIdx, args, isPrivateData = false) { console.log(' -> Creating assets of sizes: ', args.create_sizes); const testAssetNum = args.assets ? parseInt(args.assets) : 0; @@ -83,10 +83,20 @@ module.exports.addBatchAssets = async function(bcObj, context, clientIdx, args) for (const index in batches){ const batch = batches[index]; try { - const myArgs = { - chaincodeFunction: 'createAssetsFromBatch', - chaincodeArguments: [JSON.stringify(batch)] - }; + let myArgs; + if(!isPrivateData) { + myArgs = { + chaincodeFunction: 'createAssetsFromBatch', + chaincodeArguments: [JSON.stringify(batch)] + }; + } else { + myArgs = { + chaincodeFunction: 'createPrivateAssetsFromBatch', + chaincodeArguments: ['50'], + transientData: {content: JSON.stringify(batch)} + } + } + await bcObj.invokeSmartContract(context, 'fixed-asset', undefined, myArgs, undefined, false); } catch (err) { console.error('Error: ', err); diff --git a/networks/fabric/ansible-playbooks/README.md b/networks/fabric/ansible-playbooks/README.md new file mode 100644 index 000000000..eb8ce8f2c --- /dev/null +++ b/networks/fabric/ansible-playbooks/README.md @@ -0,0 +1,61 @@ +# 2Org1Peer Ansible Playbook + +An Ansible playbook for building a Hyperledger Fabric network with two organizations, Org1 and Org2, each with one peer. The peers are configured with a single channel, channel1. The fixed-asset contract is instantiated on this channel, with an endorsement policy stating that both organizations must endorse any transactions. The channel is also instantiated with a private data configuration file. + +## Pre-requisites +Before you can run this Ansible playbook, you need to make sure that you have all of the pre-requisites installed. + +1. You first need to install the Ansible role which is provided as part of the IBM Blockchain Platform and enables you to automate the building of Hyperledger Fabric networks. To do this use the following command in your terminal: +``` +ansible-galaxy install ibm.blockchain_platform_manager +``` + +2. The Ansible role requires the following pre-requisites: + - Python 3.7+ + - https://www.python.org/downloads/ + - Ansible 2.8+ + - `pip install ansible` + - Hyperledger Fabric v1.4 binaries (`configtxgen`, `peer`, `fabric-ca-client`, etc) + - https://hyperledger-fabric.readthedocs.io/en/release-1.4/install.html + - One of the following supported deployment targets: + - IBM Blockchain Platform on IBM Cloud + - IBM Blockchain Platform on Red Hat OpenShift + - Docker 19.03+ + - Docker SDK for Python (if using Docker) + - `pip install docker` + - `jq` + - https://stedolan.github.io/jq/download/ + - `sponge` + - `apt-get install moreutils` (Ubuntu) + - `brew install moreutils` (macOS) + + +3. From this directory in your terminal, run the following command to install all required roles, including the `ibm.blockchain_platform_manager` role, from Ansible Galaxy: + ``` + ansible-galaxy install -r requirements.yml --force + ``` + +## Run the Ansible playbook +To run the ansible playbook, use the following command from this playbook directory +``` +ansible-playbook playbook.yml +``` + +To teardown the Ansible generated Fabric network and remove all associated files, use the following command +``` +ansible-playbook --extra-vars state=absent playbook.yml +``` + +## Next steps +To use this network for a benchmark run, you'll also need to set up a network configuration file based on this Ansible generated Fabric network. + +Below is a list of some of the things that will need to be included in the network configuration file and where you can obtain them from the playbook.yml and from the files generated by the playbook. + +| Property | Where to find it | +|---|---| +| url | This will be in the format of `grpcs://localhost:port`. You can find the port number in the playbook.yml file under `docker: port` | +| hostnameOverride | You can find the port number in the playbook.yml file under `docker: hostname` | +| mspid | You can find this in the playbook.yml file under `msp: id` | +| tlsCACerts | The tlsCACerts is in one of the files generated by the playbook. The path to the file for Org1 should be `networks/fabric/ansible-playbooks/2org1peer/wallets/Org1/ca-tls-root.pem` | + +You can find more information on setting up a Fabric network configuration file and the other properties you will need to include on the [Hyperledger Caliper](https://hyperledger.github.io/caliper/v0.3.1/fabric-config/) website. diff --git a/networks/fabric/ansible-playbooks/v1/2org1peer/.gitignore b/networks/fabric/ansible-playbooks/v1/2org1peer/.gitignore new file mode 100644 index 000000000..d0b8cc399 --- /dev/null +++ b/networks/fabric/ansible-playbooks/v1/2org1peer/.gitignore @@ -0,0 +1,9 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# + +gateways +nodes +wallets +!**/.gitkeep +service-creds.json \ No newline at end of file diff --git a/networks/fabric/ansible-playbooks/v1/2org1peer/collections-config.json b/networks/fabric/ansible-playbooks/v1/2org1peer/collections-config.json new file mode 100644 index 000000000..37aa0bcaf --- /dev/null +++ b/networks/fabric/ansible-playbooks/v1/2org1peer/collections-config.json @@ -0,0 +1,10 @@ +[ + { + "name": "CollectionOne", + "policy": "OR('Org1MSP.member')", + "requiredPeerCount": 0, + "maxPeerCount": 1, + "blockToLive": 0, + "memberOnlyRead": false + } +] \ No newline at end of file diff --git a/networks/fabric/ansible-playbooks/v1/2org1peer/fixed-asset@1.0.0.cds b/networks/fabric/ansible-playbooks/v1/2org1peer/fixed-asset@1.0.0.cds new file mode 100644 index 0000000000000000000000000000000000000000..da5dd97460dcf500cc7379dcab3528365305242b GIT binary patch literal 2862 zcmV+}3(@oni2(=#5`+N?ZZB1HWpZ;bYet7FJobBX>et7En;PEV`yz*a%*!hb8=%ZW?^D- zX=5*8aA_}QX?SI1En#zWWppoYZ)9Z>3ub9}Wn?X3b8}^M8U-;fFfK3}pA{dA2mk;8 z0000J?OS_u+qe<$zvoYZ@nj+?XVJ3rXeX|&_3Ki%X`I@ZyUut#4opH85{lFTplwyB z@7`TJNRXoBmz<|Dlb9l~i+%idu>eUa!d0F`>@xTu|8!5S)9Ji;_Ke_PXw|=+&ieBg zq*FA~SzCLyj^po3Airf&Q30adl}}ZgZ$yz#lM_3bqY@n3P)!ZX4A_- z_PkSx|EKGJe*O^upQ7C`{#Tz6CS#tccElw;jTuSOHfDhoE@vi)G1<1^OeTJw=+RRc z*mkkrrbDhXg`tYmv)AmL#XvC>X~?7uSaKe0?;q@*Z2x0(@6Go9&hE+U{a3H{-n=?_ zz5DC#>kUiF<*!VfrXWg}G;=qNUM-DOyih^il+(8w!EGH+E3W>2Aa&6m=A_vqp(kT5~dL&DkY&{R53ymq63ep zK2>BuN2E{B;WhzV3e^3aM=T4ePS?n}uALp5^t zXu>QlFbc7a_ee8Ghu?wC0yzNxHygVkhv3OsnIGbFCW$cs(v)QMX(stqeql5uB{`>p>jYx&xFHjFu>&b0 zRPl&h2oCJ@)Rq2T9uKcb2PI#R*P(V#O6M-S)` z-$GotzrWeAC~Npdgv2zs-x@zvFuo9I`>g9Tty!jr-Z)sOUAip>YFv%dB8BCqY-n z5zSYa8U^nZO|#7R5K&)bMEUhWZD5_CmMwQYai{RApKETEGm0yvq_?RGW@|7&3p`1RwDRpoKyoN^Ug>4)R~K?B~Gu*w=VFBeg~-Yl=Y0;}dVEGEEmKGE6Zf=WA21mW!GFvq1$B6pbKj
zjJt;K)Yc3#lNjZr{Rst=S*%=&^T+uC4FRy&eVSu9@9gY-adu{<#O2OW>NI3?@1rsP zoxHEDCGA;S1*yQMILm0ws>*KdYad=;t;_FahhE`(D**Tm?v8_bTk6FlqYMeV4}|@m z*&G5I=$U>Hz!I~*Q<=MJF&n~rOAh{Sae*n~>V2Xvk~ZZQ)=jW#T_@Bl#N}1n=U1H0 z+-g(!-mN-iZB{HAv4>8hxTv(Rz_NKxJ_n(3?CRcWjAl|#@6H5uF^9f>_ik@TF0O(! zd~rDCMqv<2&#_4G-N(+YhIeZo4!L3j{c^%_6=dj2S+|wbPL{Jabh@*CC#QqAYer1% z;0keI$Tg!u+XxkaB2M<3cv-9J?V<0R(GLLYXuC}_TcJl|BTLMZGu59T1$yMHDA-|O zO}2nk{mIj9q$RI`yy;l%s_gN+`HJij-*GiIoUU7#ee=o9Gwgljw|e$pPm0S?sTDzu z(DHS-(2wg(yep9<8n9f>%EdIa;q48 zpT}UGyG;_!O?>od?5-!rT0Jw$Y#cGqdLqt9w%U&qQx>Gc8 zSaFl7K9@MrW*nPoWYY;Zhea7n-I@cm0~V$tlG|y9r?XDWpvNHkS#%;y_eqHTD^t{4 zRtt~PDOMirIc-8%44y2D@=2}#9oqa33&V7JF=6gh#W=1UhPtlS{2Iy)yQ)@lei_0A zp6}WxA5q)sF)Ee&y=eX2L+4Aj|+bG zfA9F92EzJ;UZx4CG<~*?W$ZKwHcL{jwr0+3Un^-%&j1yKu4J1`h&u7GIWWTzJ#3mw zS)!}NsF7moPT^#;8MWMJJ`cdgd)fj~ny}WiCWXT*?loqpNu}7Xg{QJ8#Z`qYj8lAL zc%vG~qVVE%%^qYHxF=T2Yr}|6(Ic+2aCMl#lS+Jw?@`Kq1Dl zo?&t(BbG!38~AZ6Cf`#HiZ=0zQ0IshYsvepM2oqvEJR~I;2IF}CH6WgCo7M}M$XCc zS6z?l?cvr)TclDF^?l#H(?>?2|LuE4nimH z&x+d;?trKuD@=u?KXuo%JaA_?FgIQ-Fa0n<5+h(vqL&e^pZ(m>iQGnJrrL6(X12Dh zN)elHBaut-Nu;&M5SAyFguD>4=DS9uTo2YXkxQa(ob2;K(GQY99{t?7cxVspp?&-9 M|5Yd!jsQde0Fr)-HUIzs literal 0 HcmV?d00001 diff --git a/networks/fabric/ansible-playbooks/v1/2org1peer/playbook.yml b/networks/fabric/ansible-playbooks/v1/2org1peer/playbook.yml new file mode 100644 index 000000000..ce9f7b005 --- /dev/null +++ b/networks/fabric/ansible-playbooks/v1/2org1peer/playbook.yml @@ -0,0 +1,189 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +- name: Deploy blockchain infrastructure and smart contracts + hosts: localhost + vars: + infrastructure: + type: docker + docker: + network: fabric_network + saas: "{}" + organizations: + - &Org1 + msp: + id: "Org1MSP" + admin: + identity: "org1Admin" + secret: "org1Adminpw" + ibp: + display_name: "Org1 MSP" + ca: &Org1CA + id: "Org1CA" + admin_identity: "admin" + admin_secret: "adminpw" + tls: + enabled: true + docker: + name: ca.org1.example.com + hostname: ca.org1.example.com + port: 18050 + ibp: + display_name: "Org1 CA" + peers: + - &Org1Peer1 + id: "Org1Peer1" + identity: "org1peer1" + secret: "org1peer1pw" + database_type: couchdb + tls: + enabled: true + identity: "org1peer1tls" + secret: "org1peer1tlspw" + docker: + name: peer0.org1.example.com + hostname: peer0.org1.example.com + port: 18051 + chaincode_port: 18052 + operations_port: 18053 + couchdb: + name: couchdb0.org1.example.com + hostname: couchdb0.org1.example.com + port: 18054 + ibp: + display_name: "Org1 Peer1" + nodes: "{{ playbook_dir }}/nodes/Org1" + wallet: "{{ playbook_dir }}/wallets/Org1" + gateways: "{{ playbook_dir }}/gateways/Org1" + - &Org2 + msp: + id: "Org2MSP" + admin: + identity: "org2Admin" + secret: "org2Adminpw" + ibp: + display_name: "Org2 MSP" + ca: &Org2CA + id: "Org2CA" + admin_identity: "admin" + admin_secret: "adminpw" + tls: + enabled: true + docker: + name: ca.org2.example.com + hostname: ca.org2.example.com + port: 19050 + ibp: + display_name: "Org2 CA" + peers: + - &Org2Peer1 + id: "Org2Peer1" + identity: "org2peer1" + secret: "org2peer1pw" + database_type: leveldb + tls: + enabled: true + identity: "org2peer1tls" + secret: "org2peer1tlspw" + docker: + name: peer0.org2.example.com + hostname: peer0.org2.example.com + port: 19051 + chaincode_port: 19052 + operations_port: 19053 + couchdb: + name: couchdb0.org2.example.com + hostname: couchdb0.org2.example.com + port: 19054 + ibp: + display_name: "Org2 Peer1" + nodes: "{{ playbook_dir }}/nodes/Org2" + wallet: "{{ playbook_dir }}/wallets/Org2" + gateways: "{{ playbook_dir }}/gateways/Org2" + - &OrdererOrg + msp: + id: "OrdererMSP" + admin: + identity: "ordererAdmin" + secret: "ordererAdminpw" + ibp: + display_name: "Orderer MSP" + ca: &OrdererCA + id: "OrdererCA" + admin_identity: "admin" + admin_secret: "adminpw" + tls: + enabled: true + docker: + name: ca.orderer.example.com + hostname: ca.orderer.example.com + port: 17050 + ibp: + display_name: "Orderer CA" + orderer: &Orderer + id: "Orderer1" + identity: "orderer1" + secret: "orderer1pw" + tls: + enabled: true + identity: "orderer1tls" + secret: "orderer1tlspw" + consortium: + members: + - *Org1 + - *Org2 + docker: + name: orderer.example.com + hostname: orderer.example.com + port: 17051 + operations_port: 17052 + ibp: + display_name: "Orderer1" + cluster_name: "OrdererCluster" + nodes: "{{ playbook_dir }}/nodes/Orderer" + wallet: "{{ playbook_dir }}/wallets/Orderer" + gateways: "{{ playbook_dir }}/gateways/Orderer" + channels: + - &Channel1 + name: channel1 + orderer: *Orderer + members: + - <<: *Org1 + committing_peers: + - *Org1Peer1 + anchor_peers: + - *Org1Peer1 + - <<: *Org2 + committing_peers: + - *Org2Peer1 + anchor_peers: + - *Org2Peer1 + contracts: + - name: fixed-asset + version: 1.0.0 + package: "{{ playbook_dir }}/fixed-asset@1.0.0.cds" + channels: + - <<: *Channel1 + collections_config: "{{ playbook_dir }}/collections-config.json" + endorsement_policy: "AND('Org1MSP.member','Org2MSP.member')" + endorsing_members: + - <<: *Org1 + endorsing_peers: + - <<: *Org1Peer1 + - <<: *Org2 + endorsing_peers: + - <<: *Org2Peer1 + gateways: + - name: Org1 gateway + organization: + <<: *Org1 + gateway_peers: + - <<: *Org1Peer1 + - name: Org2 gateway + organization: + <<: *Org2 + gateway_peers: + - <<: *Org2Peer1 + roles: + - ibm.blockchain_platform_manager diff --git a/networks/fabric/ansible-playbooks/v1/2org1peer/requirements.yml b/networks/fabric/ansible-playbooks/v1/2org1peer/requirements.yml new file mode 100644 index 000000000..4d2a74531 --- /dev/null +++ b/networks/fabric/ansible-playbooks/v1/2org1peer/requirements.yml @@ -0,0 +1,5 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +- ibm.blockchain_platform_manager \ No newline at end of file diff --git a/networks/fabric/ansible-playbooks/v2/2org1peer/.gitignore b/networks/fabric/ansible-playbooks/v2/2org1peer/.gitignore new file mode 100644 index 000000000..d0b8cc399 --- /dev/null +++ b/networks/fabric/ansible-playbooks/v2/2org1peer/.gitignore @@ -0,0 +1,9 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# + +gateways +nodes +wallets +!**/.gitkeep +service-creds.json \ No newline at end of file diff --git a/networks/fabric/ansible-playbooks/v2/2org1peer/collections-config.json b/networks/fabric/ansible-playbooks/v2/2org1peer/collections-config.json new file mode 100644 index 000000000..37aa0bcaf --- /dev/null +++ b/networks/fabric/ansible-playbooks/v2/2org1peer/collections-config.json @@ -0,0 +1,10 @@ +[ + { + "name": "CollectionOne", + "policy": "OR('Org1MSP.member')", + "requiredPeerCount": 0, + "maxPeerCount": 1, + "blockToLive": 0, + "memberOnlyRead": false + } +] \ No newline at end of file diff --git a/networks/fabric/ansible-playbooks/v2/2org1peer/core.yaml b/networks/fabric/ansible-playbooks/v2/2org1peer/core.yaml new file mode 100644 index 000000000..232ecb66f --- /dev/null +++ b/networks/fabric/ansible-playbooks/v2/2org1peer/core.yaml @@ -0,0 +1,703 @@ +# Copyright IBM Corp. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +# + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The Peer id is used for identifying this Peer instance. + id: jdoe + + # The networkId allows for logical seperation of networks + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + # chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer listenAddress. + # chaincodeAddress: 0.0.0.0:7052 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7051 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + addressAutoDetect: false + + # Setting for runtime.GOMAXPROCS(n). If n < 1, it does not change the + # current setting + gomaxprocs: -1 + + # Keepalive settings for peer server and clients + keepalive: + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. It is recommended to + # use leader election for large networks of peers. + useLeaderElection: true + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization + orgLeader: false + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: + # Maximum count of blocks stored in memory + maxBlockCountToStore: 100 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: false + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations. + externalEndpoint: + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is true, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network + enabled: true + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflect the maximum distance between lowest and + # highest block sequence number state buffer to avoid holes. + # In order to ensure absence of the holes actual buffer size + # is twice of this distance + blockBufferSize: 100 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + # Note that peer-chaincode connections through chaincodeListenAddress is + # not mutual TLS auth. See comments on chaincodeListenAddress for more info + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server (and client if clientAuthEnabled + # is set to true + key: + file: tls/server.key + # Trusted root certificate chain for tls.cert + rootcert: + file: tls/ca.crt + # Set of root certificate authorities used to verify client certificates + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. If + # not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: + # Token Label + Label: + # User PIN + Pin: + Hash: + Security: + FileKeyStore: + KeyStore: + + # Path on the file system where peer will find MSP local configurations + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # The total time to spend retrying connections to ordering nodes + # before giving up and returning an error. + reconnectTotalTimeThreshold: 3600s + + # The connection timeout when connecting to ordering service nodes. + connTimeout: 3s + + # The maximum delay between consecutive connection retry attempts to + # ordering nodes. + reConnectBackoffThreshold: 3600s + + # A list of orderer endpoint addresses which should be overridden + # when found in channel configurations. + addressOverrides: + # - from: + # to: + # caCertsFile: + # - from: + # to: + # caCertsFile: + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # The admin service is used for administrative operations such as + # control over logger levels, etc. + # Only peer administrators can use the service. + adminService: + # The interface and port on which the admin server will listen on. + # If this is commented out, or the port number is equal to the port + # of the peer listen address - the admin service is attached to the + # peer's service (defaults to 7051). + #listenAddress: 0.0.0.0:7055 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: false +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + LogConfig: + Type: json-file + Config: + max-size: "50m" + max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:latest + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(BASE_DOCKER_NS)/fabric-baseos:$(ARCH)-$(BASE_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + car: + # car may need more facilities (JVM, etc) in the future as the catalog + # of platforms are expanded. For now, we can just use baseos + runtime: $(BASE_DOCKER_NS)/fabric-baseos:$(ARCH)-$(BASE_VERSION) + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:$(ARCH)-$(PROJECT_VERSION) + + node: + # need node.js engine at runtime, currently available in baseimage + # but not in baseos + runtime: $(BASE_DOCKER_NS)/fabric-baseimage:$(ARCH)-$(BASE_VERSION) + + # Timeout duration for starting up a container and waiting for Register + # to come through. 1sec should be plenty for chaincode unit tests + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communiction goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # system chaincodes whitelist. To add system chaincode "myscc" to the + # whitelist, add "myscc: enable" to the list below, and register in + # chaincode/importsysccs.go + system: + cscc: enable + lscc: enable + escc: enable + vscc: enable + qscc: enable + + # System chaincode plugins: + # System chaincodes can be loaded as shared objects compiled as Go plugins. + # See examples/plugins/scc for an example. + # Plugins must be white listed in the chaincode.system section above. + systemPlugins: + # example configuration: + # - enabled: true + # name: myscc + # path: /opt/lib/myscc.so + # invokableExternal: true + # invokableCC2CC: true + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompases both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup + maxRetriesOnStartup: 12 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Warm indexes after every N blocks. + # This option warms any indexes that have been + # deployed to CouchDB after every N blocks. + # A value of 1 will warm indexes after every block commit, + # to ensure fast selector queries. + # Increasing the value may improve write efficiency of peer and CouchDB, + # but may degrade query response time. + warmIndexesAfterNBlocks: 1 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: disabled + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: diff --git a/networks/fabric/ansible-playbooks/v2/2org1peer/fixed-asset@1.0.0.tar.gz b/networks/fabric/ansible-playbooks/v2/2org1peer/fixed-asset@1.0.0.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9c00c0a0fd55940d7b99ba0e4c0eda6372e00900 GIT binary patch literal 2968 zcmV;J3up8niwFP!000006YbVlP!n7h0N?qk%HOTX3}3(cn>r4!z}mlm*;G zxC6ug+vWc9Z{~LNta3*kAm*A@(=y52vPdg|NjXh$t-_O_%m?gQmVn)7@rz^AvyQA^3v-aiV z#|9w5bdzl>CFQKFZfH8j&t&k_C*H1z@n)A%gIb~8sh8(E5Y~CFo+ZpNC%iLyWn8kK z+er$l{zeNfnLmlkEG)u;-wll2BJZ?Rece=I-El_FbVIs+?nkz^?~!ej=){f4h(m6A zLGc^#;nf0ekQ=@%s7GAmfuQlEvPbkOIZsfc!d&CB87Kn(al-b@Y*5`|sctnSt_U+l zdPTwYURF5eJwRZ02t4OU6`|n$=x!+L=-%Pj*5C&1YR%pvSuOP0!N%4Ghw0F2jTx>~ z)Fc~<(o7W*&|=JNKg;=OwiK)6NLbj!_ATZWd^`I-RcnB(eW}fndPB*mA`N1;k`sp^ zvW_&8^Xc=Jv!9RWj^yNU#k-W$T88qL@`(K=UgTn@pu<#O+pZVB7oM|0J4j7QapmbOd>kLxN0?s;{~$b#AG9(*%`7$&@p z9%Z};psSp16HQneggp{@hXN%j046gKU%Wxz?IYmO3oC}>&SUh8UL-z%kpjNg6CkA>BCBrNY zF~)~l$KTlDqNBEsTU4ZPB6Yrj*2OOp?`<4&r8b}E3$A050^02)I) z$HJVKwmy7PkCgso(HY$Uxdf>486d*!1m(2$&wn3(PE&~!)w2D# zA6y`S7eUnLC#1!blwI0q{YH}bNLKv9_oSH!XBMu_Dt?c!$tqK6|43}BN*TDejIRL7U6lh=Au03XXXP%N<}kK zvC6PLohaJ-%n{P(gfZIrw2rV1vYeBBp#H9Zl{Q@Iq9~z#wU1}?YK{;y^^`ejwBBCgTko#=*4D`G7H@o z&a?k@CDncv`5qU@@AxTUWxvOKm-Sp)Y!HU|dhET@n#Q}^5Asnav;iVx4Mme5$4 zAHph~FuVQV88{efXTx*dRH+n7I5NoNanI_Q^*R)RqLwHe=96ox#5ZBrn4ff(st$XN zmJbDN`ik6et{SGgC)`-RBMsPR-%5!IbzJWiD`97C=Y$!BK0b=z?&Kx4+dgXZ zQ8?Ru$CUFh02wXyZautM4y(L+dd9rg=eoqSbkC~&!rE2b*lQo#{lzLZcmFFgLUjoi zCCz0|YdY&gEi_LQAt~#N-uFFhEU9D1ZYw0TM|=yoX((5H4|*4=x@rLDHW{xo+J4hN zRuv!Ydycckm5CPEjpRpsG=Vi`PFZA;l+mrE2nk|Lldpl<<`NsyWk9J!dx3>N3TUyH z7XqtT-z=i$O>hA=yxPnmjzxxUVx}Wl$jbr|=CJpblCy9Z^TsGU+TfHwLA)lK{K%&_$ne(eV@KHqMh0zErDJ!JA$eQvb& z6PkOPES)YcRO_ie5o&Y`029R}!2@WHCwzd&rh z!00hj19D~Z%Ssd`H499CqPtg|s>aB5cg19GKoU9p_UmOQbMkDuTnJl?poSOkY7OjX zo>X~3^t;f^c#oFEy%^aw6hM)70`|ygm(qaUc!re@tT=PGpx-6G&761C{O%VoO0`7) zH4Vs+pRmGs7;wy}2~>=O`4rrKC!C<_{K`*%kN2g_^z{up5kWPkS*6^cdTL*h>V1{x zw_l***m1g`hRh`8gXlf#O_!LaK2^Z_hYSJE;Tw|ut+vox4#oFf`EYn{gsd+IHIU%E{D8sVnCw7yWe{*zq#S zpN*^Nf13ZvdGa|rOaZ=S*Q;7B9N$_!^HXSuqpkc(V1!^dGoCsxaehpIbXxG0vyk6z z`x1q0G>|h%9Sbf;qWA^<>0eBz3>=c=wOk8>Ose(D23- zcM6nM+0>@G(PPZdS=!rt9Kau2jmR@gRNJY&2!773;g@R)E3W7%OLtwjSVoWp8(w>A za&`9InP;@lt(|!aihC}-rQ}g&{CKuHj#w|*Yab_b-}bIsNQuz1xN6e7;Bq;_;3MgI z3TilC_FG|q&rKBZnsv52-BrwVZiVqmM(Xy0hW^Xq%jf&*@bEy8Se731*?pum=&daW zMtKKgwU)^5J9tp3m)ue#qvxi%B*gt$d{0eELOQk2U~F*b>h;w1&E***M}iIS>badi zeYVP&K}=e@ex<&cIVoizUDv+vgSMd!$6(L_m2zsN zvwBqoy{Y}sZ*%#e!*Zg}R7*wz-I?W#b*G+vvC7{ZnL>?v%oYVs{wDA%Gfn13--^n(D7q;OEy^FHIM4>PT#jV-|R~qRK?2&Eg1Jw_iO~ zzvZb4R=FV~PXlZ7in&-6XY*7Q&|betdTroQIv+^zXl$%B;mNxxmTtOZOlaum&XUNA zC3Dw*Xfq%0761V7OzLd_ literal 0 HcmV?d00001 diff --git a/networks/fabric/ansible-playbooks/v2/2org1peer/playbook.yml b/networks/fabric/ansible-playbooks/v2/2org1peer/playbook.yml new file mode 100644 index 000000000..4d5a8557f --- /dev/null +++ b/networks/fabric/ansible-playbooks/v2/2org1peer/playbook.yml @@ -0,0 +1,190 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +- name: Deploy blockchain infrastructure and smart contracts + hosts: localhost + vars: + infrastructure: + type: docker + docker: + network: fabric_network + saas: "{}" + organizations: + - &Org1 + msp: + id: "Org1MSP" + admin: + identity: "org1Admin" + secret: "org1Adminpw" + ibp: + display_name: "Org1 MSP" + ca: &Org1CA + id: "Org1CA" + admin_identity: "admin" + admin_secret: "adminpw" + tls: + enabled: true + docker: + name: ca.org1.example.com + hostname: ca.org1.example.com + port: 18050 + ibp: + display_name: "Org1 CA" + peers: + - &Org1Peer1 + id: "Org1Peer1" + identity: "org1peer1" + secret: "org1peer1pw" + database_type: couchdb + tls: + enabled: true + identity: "org1peer1tls" + secret: "org1peer1tlspw" + docker: + name: peer0.org1.example.com + hostname: peer0.org1.example.com + port: 18051 + chaincode_port: 18052 + operations_port: 18053 + couchdb: + name: couchdb0.org1.example.com + hostname: couchdb0.org1.example.com + port: 18054 + ibp: + display_name: "Org1 Peer1" + nodes: "{{ playbook_dir }}/nodes/Org1" + wallet: "{{ playbook_dir }}/wallets/Org1" + gateways: "{{ playbook_dir }}/gateways/Org1" + - &Org2 + msp: + id: "Org2MSP" + admin: + identity: "org2Admin" + secret: "org2Adminpw" + ibp: + display_name: "Org2 MSP" + ca: &Org2CA + id: "Org2CA" + admin_identity: "admin" + admin_secret: "adminpw" + tls: + enabled: true + docker: + name: ca.org2.example.com + hostname: ca.org2.example.com + port: 19050 + ibp: + display_name: "Org2 CA" + peers: + - &Org2Peer1 + id: "Org2Peer1" + identity: "org2peer1" + secret: "org2peer1pw" + database_type: leveldb + tls: + enabled: true + identity: "org2peer1tls" + secret: "org2peer1tlspw" + docker: + name: peer0.org2.example.com + hostname: peer0.org2.example.com + port: 19051 + chaincode_port: 19052 + operations_port: 19053 + couchdb: + name: couchdb0.org2.example.com + hostname: couchdb0.org2.example.com + port: 19054 + ibp: + display_name: "Org2 Peer1" + nodes: "{{ playbook_dir }}/nodes/Org2" + wallet: "{{ playbook_dir }}/wallets/Org2" + gateways: "{{ playbook_dir }}/gateways/Org2" + - &OrdererOrg + msp: + id: "OrdererMSP" + admin: + identity: "ordererAdmin" + secret: "ordererAdminpw" + ibp: + display_name: "Orderer MSP" + ca: &OrdererCA + id: "OrdererCA" + admin_identity: "admin" + admin_secret: "adminpw" + tls: + enabled: true + docker: + name: ca.orderer.example.com + hostname: ca.orderer.example.com + port: 17050 + ibp: + display_name: "Orderer CA" + orderer: &Orderer + id: "Orderer1" + identity: "orderer1" + secret: "orderer1pw" + tls: + enabled: true + identity: "orderer1tls" + secret: "orderer1tlspw" + consortium: + members: + - *Org1 + - *Org2 + docker: + name: orderer.example.com + hostname: orderer.example.com + port: 17051 + operations_port: 17052 + ibp: + display_name: "Orderer1" + cluster_name: "OrdererCluster" + nodes: "{{ playbook_dir }}/nodes/Orderer" + wallet: "{{ playbook_dir }}/wallets/Orderer" + gateways: "{{ playbook_dir }}/gateways/Orderer" + channels: + - &Channel1 + name: channel1 + orderer: *Orderer + members: + - <<: *Org1 + committing_peers: + - *Org1Peer1 + anchor_peers: + - *Org1Peer1 + - <<: *Org2 + committing_peers: + - *Org2Peer1 + anchor_peers: + - *Org2Peer1 + contracts: + - package: "{{ playbook_dir }}/fixed-asset@1.0.0.tar.gz" + channels: + - <<: *Channel1 + definitions: + - name: fixed-asset + version: 1.0.0 + endorsement_policy: "AND('Org1MSP.member','Org2MSP.member')" + collections_config: "{{ playbook_dir }}/collections-config.json" + endorsing_members: + - <<: *Org1 + endorsing_peers: + - <<: *Org1Peer1 + - <<: *Org2 + endorsing_peers: + - <<: *Org2Peer1 + gateways: + - name: Org1 gateway + organization: + <<: *Org1 + gateway_peers: + - <<: *Org1Peer1 + - name: Org2 gateway + organization: + <<: *Org2 + gateway_peers: + - <<: *Org2Peer1 + roles: + - ibm.blockchain_platform_manager diff --git a/networks/fabric/ansible-playbooks/v2/2org1peer/requirements.yml b/networks/fabric/ansible-playbooks/v2/2org1peer/requirements.yml new file mode 100644 index 000000000..4d2a74531 --- /dev/null +++ b/networks/fabric/ansible-playbooks/v2/2org1peer/requirements.yml @@ -0,0 +1,5 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +- ibm.blockchain_platform_manager \ No newline at end of file diff --git a/src/fabric/api/fixed-asset/node/lib/fixed-asset.js b/src/fabric/api/fixed-asset/node/lib/fixed-asset.js index e851a5883..47647b06a 100644 --- a/src/fabric/api/fixed-asset/node/lib/fixed-asset.js +++ b/src/fabric/api/fixed-asset/node/lib/fixed-asset.js @@ -11,6 +11,8 @@ const { Contract } = require('fabric-contract-api'); const logLevel = process.env.CORE_CHAINCODE_LOGGING_LEVEL; const isVerbose = (logLevel && (logLevel.toUpperCase() === 'INFO' || logLevel.toUpperCase() === 'DEBUG' )); +const collection = "CollectionOne"; + /** * Simple chaincode to create an asset that may have a user provided body */ @@ -60,6 +62,34 @@ class Asset extends Contract { } } + /** + * Create an Asset in the private data store based on the transient data that is provided of the form + * { + * uuid: unique identifier + * creator: the creator + * bytesize: target bytesize of asset + * content: variable content + * } + * + * Writes transient data against the passed uuid + * @param {Context} ctx the context + * @param {number} uuid the uuid to persist the body under + */ + async createPrivateAsset(ctx, uuid) { + if (isVerbose) { + console.log('Entering createPrivateAsset'); + } + const privateAsset = {}; + const transientData = ctx.stub.getTransient(); + privateAsset.content = transientData.get('content').toString('utf8'); + + await ctx.stub.putPrivateData(collection, uuid, Buffer.from(JSON.stringify(privateAsset))); + + if (isVerbose) { + console.log('Exiting createPrivateAsset'); + } + } + /** * Create an Asset in the registry based on the body that is provided of the form * { @@ -110,6 +140,40 @@ class Asset extends Contract { } } + /** + * Create a set of Assets in the registry based on the body that is provided of the form + * [{ + * uuid: unique identifier + * creator: the creator + * bytesize: target bytesize of asset + * content: variable content + * }, ...] + * @param {Context} ctx the context + * @param {String} batch the content to persist within an array + */ + async createPrivateAssetsFromBatch(ctx, batch_size) { + + if (isVerbose) { + console.log('Entering createPrivateAssetsFromBatch'); + } + + const transientContent = ctx.stub.getTransient().get('content'); + const transientData = JSON.parse(transientContent); + + for (let i=0; i