diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 000000000..daddae876 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,51 @@ +name: Benchmark tests + +on: + push: + # branches: [ master ] + pull_request: + # branches: [ master ] + +jobs: + benchmark: + runs-on: ubuntu-latest + steps: + + - name: Setup go 1.16 + uses: actions/setup-go@v2 + with: + go-version: '1.16' # The Go version to download (if necessary) and use. + + + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + - name: Clone blobber + uses: actions/checkout@v2 + + # - name: Clone gosdk + # run: | + # [ -d $GOSDK ] && rm -rf $GOSDK + # git clone https://github.com/0chain/gosdk.git $GOSDK + # cd $GOSDK + # git checkout $BRANCH + + - name: Run benchmark + run: go test -tags bn256 -benchmem -run="BenchmarkUploadFile*" -bench="BenchmarkUploadFile*" ./... | tee benchmark.txt + + # Run `github-action-benchmark` action + # - name: Push benchmark report + # uses: cnlangzi/github-action-benchmark@v1 + # with: + # name: 0chain/gosdk Benchmark + # tool: 'go' + # benchmark-data-dir-path: ${{ env.TAG }}/bench + # output-file-path: benchmark.txt + # # Personal access token to deploy GitHub Pages branch + # github-token: ${{ secrets.GOSDK }} + # #github-token: ${{ secrets.PERSONAL_GITHUB_TOKEN }} + # # Push and deploy GitHub pages branch automatically + # auto-push: true \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0e708cf80..b03989169 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,6 +2,7 @@ name: CI on: push: + release: types: - published @@ -14,31 +15,76 @@ env: VALIDATOR_REGISTRY: ${{ secrets.VALIDATOR_REGISTRY }} jobs: - test: + integration_test: runs-on: ubuntu-20.04 + timeout-minutes: 40 steps: - - name: Install Go + + - name: Setup go 1.16 uses: actions/setup-go@v2 with: - go-version: 1.13.x - - uses: actions/checkout@v2 - - uses: satackey/action-docker-layer-caching@v0.0.11 - continue-on-error: true + go-version: '1.16' # The Go version to download (if necessary) and use. + + - name: Clone Blobber + uses: actions/checkout@v2 + + # - uses: satackey/action-docker-layer-caching@v0.0.11 + # continue-on-error: true + + + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + # git clone gosdk with same branch + # - name: Clone gosdk + # run: | + # [ -d $GOSDK ] && rm -rf $GOSDK + # git clone https://github.com/0chain/gosdk.git $GOSDK + # cd $GOSDK + # git checkout $BRANCH + - name: Build test environment and run tests run: | sed -i '/#expose_ci_port/c\ ports:\n - "5432:5432"' ./docker.local/b0docker-compose.yml ./docker.local/bin/blobber.init.setup.sh docker network create --driver=bridge --subnet=198.18.0.0/15 --gateway=198.18.0.255 testnet0 + ./docker.local/bin/build.base.sh ./docker.local/bin/build.blobber.sh + ./docker.local/bin/build.validator.sh cd docker.local/blobber1 ../bin/blobber.start_bls.sh /dev/null & cd ../.. - make integration-tests + + go16=$(which go) + sudo $go16 test ./... -args integration; golangci: name: lint runs-on: ubuntu-latest steps: + - name: Setup go 1.16 + uses: actions/setup-go@v2 + with: + go-version: '1.16' # The Go version to download (if necessary) and use. - uses: actions/checkout@v2 + + + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + # git clone gosdk with same branch + # - name: Clone gosdk + # run: | + # [ -d $GOSDK ] && rm -rf $GOSDK + # git clone https://github.com/0chain/gosdk.git $GOSDK + # cd $GOSDK + # git checkout $BRANCH + - name: golangci-lint uses: golangci/golangci-lint-action@v2 with: @@ -52,18 +98,19 @@ jobs: uses: actions/checkout@master with: fetch-depth: 1 + - name: setup golang if: success() uses: actions/setup-go@v2 with: - go-version: 1.13.x + go-version: 1.16.x - name: run buf commands if: success() run: make local dockerize_blobber: runs-on: ubuntu-20.04 - needs: test + needs: integration_test if: github.event_name == 'release' && github.event.action == 'published' steps: - uses: actions/checkout@v2 @@ -87,7 +134,7 @@ jobs: dockerize_validator: runs-on: ubuntu-20.04 - needs: test + needs: integration_test if: github.event_name == 'release' && github.event.action == 'published' steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/dockerize.yml b/.github/workflows/dockerize.yml new file mode 100644 index 000000000..b608d9749 --- /dev/null +++ b/.github/workflows/dockerize.yml @@ -0,0 +1,219 @@ +name: Dockerize + +on: + push: + branches: + - dayi/main + - dayi/qa + - dayi/dev + pull_request: + - dayi/main + - dayi/qa + - dayi/dev + # tags: + # - '*' + + +env: + BLOBBER_REGISTRY: ${{ secrets.BLOBBER_REGISTRY }} + VALIDATOR_REGISTRY: ${{ secrets.VALIDATOR_REGISTRY }} + +jobs: + build_base: + runs-on: ubuntu-20.04 + steps: + - name: Setup go 1.16 + uses: actions/setup-go@v2 + with: + go-version: '1.16' # The Go version to download (if necessary) and use. + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + - name: Clone blobber + uses: actions/checkout@v2 + + # - name: Clone gosdk + # run: | + # [ -d $GOSDK ] && rm -rf $GOSDK + # git clone https://github.com/0chain/gosdk.git $GOSDK + # cd $GOSDK + # git checkout $BRANCH + + + - name: Build blobber_base + run: ./docker.local/bin/build.base.sh + + - name: Export blobber_base + run: | + mkdir -p /tmp/0chain/ + docker save "blobber_base" > /tmp/0chain/blobber_base.tar + + - name: Upload blobber_base + uses: actions/upload-artifact@v2 + with: + name: blobber_base + path: /tmp/0chain/blobber_base.tar + + build_blobber: + needs: build_base + runs-on: ubuntu-20.04 + steps: + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + - name: Clone blobber + uses: actions/checkout@v2 + + # - name: Clone gosdk + # run: | + # [ -d $GOSDK ] && rm -rf $GOSDK + # git clone https://github.com/0chain/gosdk.git $GOSDK + # cd $GOSDK + # git checkout $BRANCH + + - name: Download blobber_base + uses: actions/download-artifact@v2 + with: + name: blobber_base + path: /tmp/0chain + + - name: Load blobber_base + run: | + docker load --input /tmp/0chain/blobber_base.tar + + #docker image ls -a + + - name: Build blobber + run: ./docker.local/bin/build.blobber.sh + + - name: Export blobber + run: | + mkdir -p /tmp/0chain/ + docker save "blobber" > /tmp/0chain/blobber.tar + + - name: Upload blobber + uses: actions/upload-artifact@v2 + with: + name: blobber + path: /tmp/0chain/blobber.tar + + publish_blobber: + needs: build_blobber + runs-on: ubuntu-20.04 + steps: + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + - name: Download blobber + uses: actions/download-artifact@v2 + with: + name: blobber + path: /tmp/0chain + + - name: Load blobber + run: | + docker load --input /tmp/0chain/blobber.tar + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Push blobber + run: | + docker tag blobber:latest ${BLOBBER_REGISTRY}:$TAG + docker push ${BLOBBER_REGISTRY}:$TAG + + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + docker tag blobber:latest ${BLOBBER_REGISTRY}:$TAG-$SHORT_SHA + docker push ${BLOBBER_REGISTRY}:$TAG-$SHORT_SHA + + + + build_validator: + needs: build_base + runs-on: ubuntu-20.04 + steps: + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + - name: Clone blobber + uses: actions/checkout@v2 + + # - name: Clone gosdk + # run: | + # [ -d $GOSDK ] && rm -rf $GOSDK + # git clone https://github.com/0chain/gosdk.git $GOSDK + # cd $GOSDK + # git checkout $BRANCH + + - name: Download blobber_base + uses: actions/download-artifact@v2 + with: + name: blobber_base + path: /tmp/0chain + + - name: Load blobber_base + run: | + docker load --input /tmp/0chain/blobber_base.tar + + #docker image ls -a + + - name: Build validator + run: ./docker.local/bin/build.validator.sh + + - name: Export validator + run: | + mkdir -p /tmp/0chain/ + docker save "validator" > /tmp/0chain/validator.tar + + - name: Upload validator + uses: actions/upload-artifact@v2 + with: + name: validator + path: /tmp/0chain/validator.tar + + publish_validator: + needs: build_validator + runs-on: ubuntu-20.04 + steps: + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + - name: Download validator + uses: actions/download-artifact@v2 + with: + name: validator + path: /tmp/0chain + + - name: Load validator + run: | + docker load --input /tmp/0chain/validator.tar + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Push validator + run: | + docker tag validator:latest ${VALIDATOR_REGISTRY}:$TAG + docker push ${VALIDATOR_REGISTRY}:$TAG + + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + docker tag validator:latest ${VALIDATOR_REGISTRY}:$TAG-$SHORT_SHA + docker push ${VALIDATOR_REGISTRY}:$TAG-$SHORT_SHA diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000..ce181c9ef --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,47 @@ +name: Tests + +on: + push: + branches: + - dayi/main + - dayi/qa + - dayi/dev + pull_request: + - dayi/main + - dayi/qa + - dayi/dev + # tags: + # - '*' + +jobs: + unit_tests: + runs-on: ubuntu-20.04 + steps: + - name: Setup go 1.16 + uses: actions/setup-go@v2 + with: + go-version: '1.16' # The Go version to download (if necessary) and use. + - name: Set GITHUB_ENV + run: | + echo "BRANCH=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + echo "GOSDK=$(echo $(dirname $(pwd)))/gosdk" >> $GITHUB_ENV + echo "TAG=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g' )" >> $GITHUB_ENV + + - name: Clone blobber + uses: actions/checkout@v2 + + # - name: Clone gosdk + # run: | + # [ -d $GOSDK ] && rm -rf $GOSDK + # git clone https://github.com/0chain/gosdk.git $GOSDK + # cd $GOSDK + # git checkout $BRANCH + + - name: Run tests + run: | + cd $GITHUB_WORKSPACE/code/go/0chain.net/ + go test -race -coverprofile=coverage.txt -covermode=atomic ./... + - name: Codecov + run: | + cd $GITHUB_WORKSPACE/code/go/0chain.net/ + bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/.gitignore b/.gitignore index 6c3267d34..7e5599120 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ +coverage.txt .DS_Store .idea/ .vscode/ +__debug_bin **/client !**/client/main.go **/docker.local/blobber* @@ -9,28 +11,6 @@ **/blobber/blobber/files **/blobber/blobber/data **/pkg/ -docker.local/blobber1/data/badgerdb/blobberstate/000000.vlog -docker.local/blobber1/data/badgerdb/blobberstate/LOCK -docker.local/blobber1/files/7a7/a9b/206/177a1b97ac960fa1ed4f5d5bc925fd1821cf69d387165f402275bdf/refs/123/6e9/a3b/6c8bbc889679c68af4372b7d68e2f2343c03ae7aaa4b25fbb8257c3 -docker.local/blobber1/files/bdf/b41/6eb/0cf7bd794d8c27fd840519ceb1d82a2207270b70f3f7c70b288c517/refs/123/6e9/a3b/6c8bbc889679c68af4372b7d68e2f2343c03ae7aaa4b25fbb8257c3 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/03d/67d/23a/b7ecc9af4f5bbfef03892ca2ec83138 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/164/731/b66/0a01c9212d23b19c8d6d9b5ad04d95d -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/262/451/a50/c1940645c1428309a4eb50349843e33 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/29a/f44/4b3/acad044ba91d6c6b2bff2f5156c6d36 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/484/7a7/d69/b0d4e90ed6c3ae8ea9e07afbda4dfa9 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/530/aaa/fb4/749ba793413bfd63bfa681d1ef76843 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/740/6ce/091/6703b9620afdd7a568d42fc2980f02d -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/873/b24/50a/26cf8f5c6e7fc0435351272603df08f -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/967/e7d/1f1/f472f222c60e31ce5a95ca92749d7d7 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/a66/228/e80/db0cbfd58778557fb6592da86837e3e -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/abc/954/6e7/687b12ef616ded07546fea91ba73042 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/bdd/772/f67/d87ee8bf3560d724ea030bf49790156 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/c19/186/b15/8a7f8c38530fec679cbdad1e43b1a3c -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/c2b/e1e/7c1/baec0d6299391c115b39187b249c15a -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/dd5/f91/f9f/2fb2b6c93e400f3cdc1e29654bb397d -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/objects/f4c/044/59a/1c3f87be1184b5b495592408f6708fe -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/refs/123/6e9/a3b/6c8bbc889679c68af4372b7d68e2f2343c03ae7aaa4b25fbb8257c3 -docker.local/blobber1/files/e55/003/7d3/f56519dff06156f16a19a40389414f94cfd4c3a548f46c218cb484e/refs/388/12d/bd3/b084ff297706d7bd307996c3363c2df53acc49c97b83201f8dc7bd9 -docker.local/blobber1/data/badgerdb/blobberstate/MANIFEST +dev.local/data +out/ -dev.local/data \ No newline at end of file diff --git a/Makefile b/Makefile index 5c9dc25ed..aa62d38b0 100644 --- a/Makefile +++ b/Makefile @@ -19,9 +19,10 @@ test: lint: golangci-lint run --timeout 2m0s; + .PHONY: integration-tests integration-tests: - sudo go test ./... -args integration; + go test ./... -args integration; diff --git a/README.md b/README.md index d5281f6d8..53e642a23 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,8 @@ - [Run blobber on ec2 / vm / bare metal over https](https://github.com/0chain/blobber/blob/master/https/README.md) +- [Blobber local development guideline](dev.local/README.md) + ## Initial Setup diff --git a/code/go/0chain.net/blobber/.gitignore b/code/go/0chain.net/blobber/.gitignore new file mode 100644 index 000000000..9e5fb362b --- /dev/null +++ b/code/go/0chain.net/blobber/.gitignore @@ -0,0 +1,2 @@ +blobber +/config \ No newline at end of file diff --git a/code/go/0chain.net/blobber/main.go b/code/go/0chain.net/blobber/main.go index e89aebf01..f5ae1f061 100644 --- a/code/go/0chain.net/blobber/main.go +++ b/code/go/0chain.net/blobber/main.go @@ -350,11 +350,13 @@ func main() { portString := flag.String("port", "", "port") grpcPortString := flag.String("grpc_port", "", "grpc_port") hostname := flag.String("hostname", "", "hostname") + configDir := flag.String("config_dir", "./config", "config_dir") flag.Parse() config.SetupDefaultConfig() - config.SetupConfig("./config") + + config.SetupConfig(*configDir) config.Configuration.DeploymentMode = byte(*deploymentMode) diff --git a/code/go/0chain.net/blobbercore/allocation/allocationchange.go b/code/go/0chain.net/blobbercore/allocation/allocationchange.go index 554cf3151..e7bc51ebc 100644 --- a/code/go/0chain.net/blobbercore/allocation/allocationchange.go +++ b/code/go/0chain.net/blobbercore/allocation/allocationchange.go @@ -8,21 +8,12 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" "github.com/0chain/blobber/code/go/0chain.net/core/common" "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/0chain/gosdk/constants" "go.uber.org/zap" "gorm.io/gorm" ) -const ( - INSERT_OPERATION = "insert" - DELETE_OPERATION = "delete" - UPDATE_OPERATION = "update" - RENAME_OPERATION = "rename" - COPY_OPERATION = "copy" - UPDATE_ATTRS_OPERATION = "update_attrs" - CREATEDIR_OPERATION = "createdir" -) - const ( NewConnection = 0 InProgressConnection = 1 @@ -32,6 +23,7 @@ const ( var OperationNotApplicable = common.NewError("operation_not_valid", "Not an applicable operation") +// AllocationChangeProcessor request transaction of file operation. it is president in postgres, and can be rebuilt for next http reqeust(eg CommitHandler) type AllocationChangeProcessor interface { CommitToFileStore(ctx context.Context) error DeleteTempFile() error @@ -68,22 +60,31 @@ func (AllocationChange) TableName() string { return "allocation_changes" } +func (change *AllocationChange) Save(ctx context.Context) error { + db := datastore.GetStore().GetTransaction(ctx) + + return db.Save(change).Error +} + +// GetAllocationChanges reload connection's changes in allocation from postgres. +// 1. update connection's status with NewConnection if connection_id is not found in postgres +// 2. mark as NewConnection if connection_id is marked as DeleteConnection func GetAllocationChanges(ctx context.Context, connectionID string, allocationID string, clientID string) (*AllocationChangeCollector, error) { cc := &AllocationChangeCollector{} db := datastore.GetStore().GetTransaction(ctx) - err := db.Where(&AllocationChangeCollector{ - ConnectionID: connectionID, - AllocationID: allocationID, - ClientID: clientID, - }).Not(&AllocationChangeCollector{ - Status: DeletedConnection, - }).Preload("Changes").First(cc).Error + err := db.Where("connection_id = ? and allocation_id = ? and client_id = ? and status <> ?", + connectionID, + allocationID, + clientID, + DeletedConnection, + ).Preload("Changes").First(cc).Error if err == nil { cc.ComputeProperties() return cc, nil } + // It is a bug when connetion_id was marked as DeletedConnection if errors.Is(err, gorm.ErrRecordNotFound) { cc.ConnectionID = connectionID cc.AllocationID = allocationID @@ -105,31 +106,31 @@ func (cc *AllocationChangeCollector) Save(ctx context.Context) error { db := datastore.GetStore().GetTransaction(ctx) if cc.Status == NewConnection { cc.Status = InProgressConnection - err := db.Create(cc).Error - return err - } else { - err := db.Save(cc).Error - return err + return db.Create(cc).Error } + + return db.Save(cc).Error } +// ComputeProperties unmarshal all ChangeProcesses from postgres func (cc *AllocationChangeCollector) ComputeProperties() { cc.AllocationChanges = make([]AllocationChangeProcessor, 0, len(cc.Changes)) for _, change := range cc.Changes { var acp AllocationChangeProcessor switch change.Operation { - case INSERT_OPERATION: + case constants.FileOperationInsert: acp = new(NewFileChange) - case UPDATE_OPERATION: - acp = new(UpdateFileChange) - case DELETE_OPERATION: + case constants.FileOperationUpdate: + acp = new(UpdateFileChanger) + case constants.FileOperationDelete: acp = new(DeleteFileChange) - case RENAME_OPERATION: + case constants.FileOperationRename: acp = new(RenameFileChange) - case COPY_OPERATION: + case constants.FileOperationCopy: acp = new(CopyFileChange) - case UPDATE_ATTRS_OPERATION: + case constants.FileOperationUpdateAttrs: acp = new(AttributesChange) + } if acp == nil { diff --git a/code/go/0chain.net/blobbercore/allocation/entity.go b/code/go/0chain.net/blobbercore/allocation/entity.go index 463b65239..6196ead9b 100644 --- a/code/go/0chain.net/blobbercore/allocation/entity.go +++ b/code/go/0chain.net/blobbercore/allocation/entity.go @@ -19,27 +19,28 @@ const ( ) type Allocation struct { - ID string `gorm:"column:id;primary_key"` - Tx string `gorm:"column:tx"` - TotalSize int64 `gorm:"column:size"` - UsedSize int64 `gorm:"column:used_size"` - OwnerID string `gorm:"column:owner_id"` - OwnerPublicKey string `gorm:"column:owner_public_key"` - RepairerID string `gorm:"column:repairer_id"`// experimental / blobber node id - PayerID string `gorm:"column:payer_id"` // optional / client paying for all r/w ops - Expiration common.Timestamp `gorm:"column:expiration_date"` - AllocationRoot string `gorm:"column:allocation_root"` - BlobberSize int64 `gorm:"column:blobber_size"` - BlobberSizeUsed int64 `gorm:"column:blobber_size_used"` - LatestRedeemedWM string `gorm:"column:latest_redeemed_write_marker"` - IsRedeemRequired bool `gorm:"column:is_redeem_required"` - TimeUnit time.Duration `gorm:"column:time_unit"` - IsImmutable bool `gorm:"is_immutable"` + ID string `gorm:"column:id;primary_key"` + Tx string `gorm:"column:tx"` + TotalSize int64 `gorm:"column:size"` + UsedSize int64 `gorm:"column:used_size"` + OwnerID string `gorm:"column:owner_id"` + OwnerPublicKey string `gorm:"column:owner_public_key"` + RepairerID string `gorm:"column:repairer_id"` // experimental / blobber node id + PayerID string `gorm:"column:payer_id"` // optional / client paying for all r/w ops + Expiration common.Timestamp `gorm:"column:expiration_date"` + // AllocationRoot allcation_root of last write_marker + AllocationRoot string `gorm:"column:allocation_root"` + BlobberSize int64 `gorm:"column:blobber_size"` + BlobberSizeUsed int64 `gorm:"column:blobber_size_used"` + LatestRedeemedWM string `gorm:"column:latest_redeemed_write_marker"` + IsRedeemRequired bool `gorm:"column:is_redeem_required"` + TimeUnit time.Duration `gorm:"column:time_unit"` + IsImmutable bool `gorm:"is_immutable"` // Ending and cleaning - CleanedUp bool `gorm:"column:cleaned_up"` - Finalized bool `gorm:"column:finalized"` + CleanedUp bool `gorm:"column:cleaned_up"` + Finalized bool `gorm:"column:finalized"` // Has many terms - Terms []*Terms `gorm:"-"` + Terms []*Terms `gorm:"-"` } func (Allocation) TableName() string { diff --git a/code/go/0chain.net/blobbercore/allocation/file_changer.go b/code/go/0chain.net/blobbercore/allocation/file_changer.go new file mode 100644 index 000000000..705aef90b --- /dev/null +++ b/code/go/0chain.net/blobbercore/allocation/file_changer.go @@ -0,0 +1,22 @@ +package allocation + +import ( + "context" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" +) + +// FileChanger file change processor +type FileChanger interface { + // ProcessChange process change and save them on reference_objects + ProcessChange(ctx context.Context, + change *AllocationChange, allocationRoot string) (*reference.Ref, error) + // Marshal marshal change as JSON string + Marshal() (string, error) + // Unmarshal unmarshal change from JSON string + Unmarshal(input string) error + // DeleteTempFile delete temp file and thumbnail from disk + DeleteTempFile() error + // CommitToFileStore move temp file and thumbnail from temp dir to persistent folder + CommitToFileStore(ctx context.Context) error +} diff --git a/code/go/0chain.net/blobbercore/allocation/file_changer_add.go b/code/go/0chain.net/blobbercore/allocation/file_changer_add.go new file mode 100644 index 000000000..cd6dcc249 --- /dev/null +++ b/code/go/0chain.net/blobbercore/allocation/file_changer_add.go @@ -0,0 +1,117 @@ +package allocation + +import ( + "context" + "encoding/json" + "path/filepath" + "strings" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/stats" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/util" + + "github.com/0chain/blobber/code/go/0chain.net/core/common" +) + +// AddFileChanger file change processor for continuous upload in INIT/APPEND/FINALIZE +type AddFileChanger struct { + BaseFileChanger +} + +// ProcessChange update references, and create a new FileRef +func (nf *AddFileChanger) ProcessChange(ctx context.Context, + change *AllocationChange, allocationRoot string) (*reference.Ref, error) { + + path, _ := filepath.Split(nf.Path) + path = filepath.Clean(path) + tSubDirs := reference.GetSubDirsFromPath(path) + + rootRef, err := reference.GetReferencePath(ctx, nf.AllocationID, nf.Path) + if err != nil { + return nil, err + } + + dirRef := rootRef + treelevel := 0 + for { + found := false + for _, child := range dirRef.Children { + if child.Type == reference.DIRECTORY && treelevel < len(tSubDirs) { + if child.Name == tSubDirs[treelevel] { + dirRef = child + found = true + break + } + } + } + if found { + treelevel++ + continue + } + if len(tSubDirs) > treelevel { + newRef := reference.NewDirectoryRef() + newRef.AllocationID = dirRef.AllocationID + newRef.Path = "/" + strings.Join(tSubDirs[:treelevel+1], "/") + newRef.ParentPath = "/" + strings.Join(tSubDirs[:treelevel], "/") + newRef.Name = tSubDirs[treelevel] + newRef.LookupHash = reference.GetReferenceLookup(dirRef.AllocationID, newRef.Path) + dirRef.AddChild(newRef) + dirRef = newRef + treelevel++ + continue + } else { + break + } + } + + var newFile = reference.NewFileRef() + newFile.ActualFileHash = nf.ActualHash + newFile.ActualFileSize = nf.ActualSize + newFile.AllocationID = dirRef.AllocationID + newFile.ContentHash = nf.Hash + newFile.CustomMeta = nf.CustomMeta + newFile.MerkleRoot = nf.MerkleRoot + newFile.Name = nf.Filename + newFile.ParentPath = dirRef.Path + newFile.Path = nf.Path + newFile.LookupHash = reference.GetReferenceLookup(dirRef.AllocationID, nf.Path) + newFile.Size = nf.Size + newFile.MimeType = nf.MimeType + newFile.WriteMarker = allocationRoot + newFile.ThumbnailHash = nf.ThumbnailHash + newFile.ThumbnailSize = nf.ThumbnailSize + newFile.ActualThumbnailHash = nf.ActualThumbnailHash + newFile.ActualThumbnailSize = nf.ActualThumbnailSize + newFile.EncryptedKey = nf.EncryptedKey + newFile.ChunkSize = nf.ChunkSize + + if err = newFile.SetAttributes(&nf.Attributes); err != nil { + return nil, common.NewErrorf("process_new_file_change", + "setting file attributes: %v", err) + } + + dirRef.AddChild(newFile) + if _, err := rootRef.CalculateHash(ctx, true); err != nil { + return nil, err + } + stats.NewFileCreated(ctx, newFile.ID) + return rootRef, nil +} + +// Marshal marshal and change to persistent to postgres +func (nf *AddFileChanger) Marshal() (string, error) { + ret, err := json.Marshal(nf) + if err != nil { + return "", err + } + return string(ret), nil +} + +// Unmarshal reload and unmarshal change from allocation_changes.input on postgres +func (nf *AddFileChanger) Unmarshal(input string) error { + if err := json.Unmarshal([]byte(input), nf); err != nil { + return err + } + + return util.UnmarshalValidation(nf) +} diff --git a/code/go/0chain.net/blobbercore/allocation/file_changer_base.go b/code/go/0chain.net/blobbercore/allocation/file_changer_base.go new file mode 100644 index 000000000..7fa347854 --- /dev/null +++ b/code/go/0chain.net/blobbercore/allocation/file_changer_base.go @@ -0,0 +1,91 @@ +package allocation + +import ( + "context" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" + "github.com/0chain/blobber/code/go/0chain.net/core/common" +) + +// BaseFileChanger base file change processor +type BaseFileChanger struct { + //client side: unmarshal them from 'updateMeta'/'uploadMeta' + ConnectionID string `json:"connection_id" validation:"required"` + //client side: + Filename string `json:"filename" validation:"required"` + //client side: + Path string `json:"filepath" validation:"required"` + //client side: + ActualHash string `json:"actual_hash,omitempty" validation:"required"` + //client side: + ActualSize int64 `json:"actual_size,omitempty" validation:"required"` + //client side: + ActualThumbnailSize int64 `json:"actual_thumb_size"` + //client side: + ActualThumbnailHash string `json:"actual_thumb_hash"` + //client side: + MimeType string `json:"mimetype,omitempty"` + //client side: + Attributes reference.Attributes `json:"attributes,omitempty"` + //client side: + MerkleRoot string `json:"merkle_root,omitempty"` + + //server side: update them by ChangeProcessor + AllocationID string `json:"allocation_id"` + //client side: + Hash string `json:"content_hash,omitempty"` + Size int64 `json:"size"` + //server side: + ThumbnailHash string `json:"thumbnail_content_hash,omitempty"` + ThumbnailSize int64 `json:"thumbnail_size"` + ThumbnailFilename string `json:"thumbnail_filename"` + + EncryptedKey string `json:"encrypted_key,omitempty"` + CustomMeta string `json:"custom_meta,omitempty"` + + ChunkSize int64 `json:"chunk_size,omitempty"` // the size of achunk. 64*1024 is default + IsFinal bool `json:"is_final,omitempty"` // current chunk is last or not + + ChunkIndex int `json:"chunk_index,omitempty"` // the seq of current chunk. all chunks MUST be uploaded one by one because of CompactMerkleTree + ChunkHash string `json:"chunk_hash,omitempty"` + UploadOffset int64 `json:"upload_offset,omitempty"` // It is next position that new incoming chunk should be append to +} + +func (nf *BaseFileChanger) DeleteTempFile() error { + fileInputData := &filestore.FileInputData{} + fileInputData.Name = nf.Filename + fileInputData.Path = nf.Path + fileInputData.Hash = nf.Hash + err := filestore.GetFileStore().DeleteTempFile(nf.AllocationID, fileInputData, nf.ConnectionID) + if nf.ThumbnailSize > 0 { + fileInputData := &filestore.FileInputData{} + fileInputData.Name = nf.ThumbnailFilename + fileInputData.Path = nf.Path + fileInputData.Hash = nf.ThumbnailHash + err = filestore.GetFileStore().DeleteTempFile(nf.AllocationID, fileInputData, nf.ConnectionID) + } + return err +} + +func (nfch *BaseFileChanger) CommitToFileStore(ctx context.Context) error { + fileInputData := &filestore.FileInputData{} + fileInputData.Name = nfch.Filename + fileInputData.Path = nfch.Path + fileInputData.Hash = nfch.Hash + _, err := filestore.GetFileStore().CommitWrite(nfch.AllocationID, fileInputData, nfch.ConnectionID) + if err != nil { + return common.NewError("file_store_error", "Error committing to file store. "+err.Error()) + } + if nfch.ThumbnailSize > 0 { + fileInputData := &filestore.FileInputData{} + fileInputData.Name = nfch.ThumbnailFilename + fileInputData.Path = nfch.Path + fileInputData.Hash = nfch.ThumbnailHash + _, err := filestore.GetFileStore().CommitWrite(nfch.AllocationID, fileInputData, nfch.ConnectionID) + if err != nil { + return common.NewError("file_store_error", "Error committing thumbnail to file store. "+err.Error()) + } + } + return nil +} diff --git a/code/go/0chain.net/blobbercore/allocation/updatefilechange.go b/code/go/0chain.net/blobbercore/allocation/file_changer_update.go similarity index 57% rename from code/go/0chain.net/blobbercore/allocation/updatefilechange.go rename to code/go/0chain.net/blobbercore/allocation/file_changer_update.go index d06006f12..0119e9bb7 100644 --- a/code/go/0chain.net/blobbercore/allocation/updatefilechange.go +++ b/code/go/0chain.net/blobbercore/allocation/file_changer_update.go @@ -5,7 +5,6 @@ import ( "encoding/json" "path/filepath" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/stats" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/util" @@ -15,11 +14,11 @@ import ( "go.uber.org/zap" ) -type UpdateFileChange struct { - NewFileChange +type UpdateFileChanger struct { + BaseFileChanger } -func (nf *UpdateFileChange) ProcessChange(ctx context.Context, change *AllocationChange, allocationRoot string) (*reference.Ref, error) { +func (nf *UpdateFileChanger) ProcessChange(ctx context.Context, change *AllocationChange, allocationRoot string) (*reference.Ref, error) { path, _ := filepath.Split(nf.Path) path = filepath.Clean(path) @@ -74,6 +73,7 @@ func (nf *UpdateFileChange) ProcessChange(ctx context.Context, change *Allocatio existingRef.ActualThumbnailHash = nf.ActualThumbnailHash existingRef.ActualThumbnailSize = nf.ActualThumbnailSize existingRef.EncryptedKey = nf.EncryptedKey + existingRef.ChunkSize = nf.ChunkSize if err = existingRef.SetAttributes(&nf.Attributes); err != nil { return nil, common.NewErrorf("process_update_file_change", @@ -85,7 +85,7 @@ func (nf *UpdateFileChange) ProcessChange(ctx context.Context, change *Allocatio return rootRef, err } -func (nf *UpdateFileChange) Marshal() (string, error) { +func (nf *UpdateFileChanger) Marshal() (string, error) { ret, err := json.Marshal(nf) if err != nil { return "", err @@ -93,48 +93,10 @@ func (nf *UpdateFileChange) Marshal() (string, error) { return string(ret), nil } -func (nf *UpdateFileChange) Unmarshal(input string) error { +func (nf *UpdateFileChanger) Unmarshal(input string) error { if err := json.Unmarshal([]byte(input), nf); err != nil { return err } return util.UnmarshalValidation(nf) } - -func (nf *UpdateFileChange) DeleteTempFile() error { - fileInputData := &filestore.FileInputData{} - fileInputData.Name = nf.Filename - fileInputData.Path = nf.Path - fileInputData.Hash = nf.Hash - err := filestore.GetFileStore().DeleteTempFile(nf.AllocationID, fileInputData, nf.ConnectionID) - if nf.ThumbnailSize > 0 { - fileInputData := &filestore.FileInputData{} - fileInputData.Name = nf.ThumbnailFilename - fileInputData.Path = nf.Path - fileInputData.Hash = nf.ThumbnailHash - err = filestore.GetFileStore().DeleteTempFile(nf.AllocationID, fileInputData, nf.ConnectionID) - } - return err -} - -func (nfch *UpdateFileChange) CommitToFileStore(ctx context.Context) error { - fileInputData := &filestore.FileInputData{} - fileInputData.Name = nfch.Filename - fileInputData.Path = nfch.Path - fileInputData.Hash = nfch.Hash - _, err := filestore.GetFileStore().CommitWrite(nfch.AllocationID, fileInputData, nfch.ConnectionID) - if err != nil { - return common.NewError("file_store_error", "Error committing to file store. "+err.Error()) - } - if nfch.ThumbnailSize > 0 { - fileInputData := &filestore.FileInputData{} - fileInputData.Name = nfch.ThumbnailFilename - fileInputData.Path = nfch.Path - fileInputData.Hash = nfch.ThumbnailHash - _, err := filestore.GetFileStore().CommitWrite(nfch.AllocationID, fileInputData, nfch.ConnectionID) - if err != nil { - return common.NewError("file_store_error", "Error committing to file store. "+err.Error()) - } - } - return nil -} diff --git a/code/go/0chain.net/blobbercore/allocation/newfilechange.go b/code/go/0chain.net/blobbercore/allocation/newfilechange.go index f3761f158..23d33fbe8 100644 --- a/code/go/0chain.net/blobbercore/allocation/newfilechange.go +++ b/code/go/0chain.net/blobbercore/allocation/newfilechange.go @@ -11,36 +11,45 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/blobbercore/stats" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/util" "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/gosdk/constants" ) type NewFileChange struct { - ConnectionID string `json:"connection_id" validation:"required"` - AllocationID string `json:"allocation_id"` - Filename string `json:"filename" validation:"required"` - ThumbnailFilename string `json:"thumbnail_filename"` - Path string `json:"filepath" validation:"required"` - Size int64 `json:"size"` - Hash string `json:"content_hash,omitempty"` - ThumbnailSize int64 `json:"thumbnail_size"` - ThumbnailHash string `json:"thumbnail_content_hash,omitempty"` - MerkleRoot string `json:"merkle_root,omitempty"` - ActualHash string `json:"actual_hash,omitempty" validation:"required"` - ActualSize int64 `json:"actual_size,omitempty" validation:"required"` - ActualThumbnailSize int64 `json:"actual_thumb_size"` - ActualThumbnailHash string `json:"actual_thumb_hash"` - MimeType string `json:"mimetype,omitempty"` - EncryptedKey string `json:"encrypted_key,omitempty"` - CustomMeta string `json:"custom_meta,omitempty"` - Attributes reference.Attributes `json:"attributes,omitempty"` - - // IsResumable the request is resumable upload - IsResumable bool `json:"is_resumable,omitempty"` - // UploadLength indicates the size of the entire upload in bytes. The value MUST be a non-negative integer. - UploadLength int64 `json:"upload_length,omitempty"` - // Upload-Offset indicates a byte offset within a resource. The value MUST be a non-negative integer. - UploadOffset int64 `json:"upload_offset,omitempty"` - // IsFinal the request is final chunk - IsFinal bool `json:"is_final,omitempty"` + //client side: unmarshal them from 'updateMeta'/'uploadMeta' + ConnectionID string `json:"connection_id" validation:"required"` + //client side: + Filename string `json:"filename" validation:"required"` + //client side: + Path string `json:"filepath" validation:"required"` + //client side: + ActualHash string `json:"actual_hash,omitempty" validation:"required"` + //client side: + ActualSize int64 `json:"actual_size,omitempty" validation:"required"` + //client side: + ActualThumbnailSize int64 `json:"actual_thumb_size"` + //client side: + ActualThumbnailHash string `json:"actual_thumb_hash"` + //client side: + MimeType string `json:"mimetype,omitempty"` + //client side: + Attributes reference.Attributes `json:"attributes,omitempty"` + //client side: + MerkleRoot string `json:"merkle_root,omitempty"` + + //server side: update them by ChangeProcessor + AllocationID string `json:"allocation_id"` + //client side: + Hash string `json:"content_hash,omitempty"` + Size int64 `json:"size"` + //server side: + ThumbnailHash string `json:"thumbnail_content_hash,omitempty"` + ThumbnailSize int64 `json:"thumbnail_size"` + ThumbnailFilename string `json:"thumbnail_filename"` + + EncryptedKey string `json:"encrypted_key,omitempty"` + CustomMeta string `json:"custom_meta,omitempty"` + + ChunkSize int64 `json:"chunk_size,omitempty"` // the size of achunk. 64*1024 is default } func (nf *NewFileChange) CreateDir(ctx context.Context, allocationID, dirName, allocationRoot string) (*reference.Ref, error) { @@ -107,7 +116,7 @@ func (nf *NewFileChange) CreateDir(ctx context.Context, allocationID, dirName, a func (nf *NewFileChange) ProcessChange(ctx context.Context, change *AllocationChange, allocationRoot string) (*reference.Ref, error) { - if change.Operation == CREATEDIR_OPERATION { + if change.Operation == constants.FileOperationCreateDir { err := nf.Unmarshal(change.Input) if err != nil { return nil, err @@ -177,6 +186,7 @@ func (nf *NewFileChange) ProcessChange(ctx context.Context, newFile.ActualThumbnailHash = nf.ActualThumbnailHash newFile.ActualThumbnailSize = nf.ActualThumbnailSize newFile.EncryptedKey = nf.EncryptedKey + newFile.ChunkSize = nf.ChunkSize if err = newFile.SetAttributes(&nf.Attributes); err != nil { return nil, common.NewErrorf("process_new_file_change", diff --git a/code/go/0chain.net/blobbercore/allocation/protocol.go b/code/go/0chain.net/blobbercore/allocation/protocol.go index 5bd71ca5e..25d03d9bb 100644 --- a/code/go/0chain.net/blobbercore/allocation/protocol.go +++ b/code/go/0chain.net/blobbercore/allocation/protocol.go @@ -49,6 +49,7 @@ func (a *Allocation) LoadTerms(ctx context.Context) (err error) { return // found in DB } +// VerifyAllocationTransaction try to get allocation from postgres.if it doesn't exists, get it from sharders, and insert it into postgres. func VerifyAllocationTransaction(ctx context.Context, allocationTx string, readonly bool) (a *Allocation, err error) { diff --git a/code/go/0chain.net/blobbercore/allocation/workers.go b/code/go/0chain.net/blobbercore/allocation/workers.go index e883f262a..b57ef59fc 100644 --- a/code/go/0chain.net/blobbercore/allocation/workers.go +++ b/code/go/0chain.net/blobbercore/allocation/workers.go @@ -11,6 +11,7 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/core/common" "github.com/0chain/blobber/code/go/0chain.net/core/lock" "github.com/0chain/blobber/code/go/0chain.net/core/transaction" + "github.com/0chain/gosdk/constants" "github.com/0chain/gosdk/zboxcore/zboxutil" "gorm.io/gorm" @@ -73,6 +74,12 @@ func waitOrQuit(ctx context.Context, d time.Duration) (quit bool) { func updateWork(ctx context.Context) { + defer func() { + if r := recover(); r != nil { + Logger.Error("[recover] updateWork", zap.Any("err", r)) + } + }() + var ( allocs []*Allocation count int64 @@ -364,7 +371,7 @@ func deleteFile(ctx context.Context, path string, change.ConnectionID = conn.ConnectionID change.Size = 0 - deleteSize - change.Operation = DELETE_OPERATION + change.Operation = constants.FileOperationDelete var dfc = &DeleteFileChange{ ConnectionID: conn.ConnectionID, diff --git a/code/go/0chain.net/blobbercore/challenge/challenge.go b/code/go/0chain.net/blobbercore/challenge/challenge.go new file mode 100644 index 000000000..c2c948a62 --- /dev/null +++ b/code/go/0chain.net/blobbercore/challenge/challenge.go @@ -0,0 +1,138 @@ +package challenge + +import ( + "bytes" + "context" + "encoding/json" + "errors" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" + "github.com/0chain/blobber/code/go/0chain.net/core/chain" + "github.com/0chain/blobber/code/go/0chain.net/core/node" + "github.com/0chain/blobber/code/go/0chain.net/core/transaction" + "github.com/remeh/sizedwaitgroup" + "go.uber.org/zap" + "gorm.io/gorm" + + "github.com/0chain/blobber/code/go/0chain.net/core/logging" +) + +type BCChallengeResponse struct { + BlobberID string `json:"blobber_id"` + Challenges []*ChallengeEntity `json:"challenges"` +} + +// syncChallenges get challenge from blockchain , and add them in database +func syncChallenges(ctx context.Context) { + defer func() { + if r := recover(); r != nil { + logging.Logger.Error("[recover] syncChallenges", zap.Any("err", r)) + } + }() + + params := make(map[string]string) + params["blobber"] = node.Self.ID + + var blobberChallenges BCChallengeResponse + blobberChallenges.Challenges = make([]*ChallengeEntity, 0) + retBytes, err := transaction.MakeSCRestAPICall(transaction.STORAGE_CONTRACT_ADDRESS, "/openchallenges", params, chain.GetServerChain()) + + if err != nil { + logging.Logger.Error("Error getting the open challenges from the blockchain", zap.Error(err)) + } else { + + bytesReader := bytes.NewBuffer(retBytes) + + d := json.NewDecoder(bytesReader) + d.UseNumber() + errd := d.Decode(&blobberChallenges) + + if errd != nil { + logging.Logger.Error("Error in unmarshal of the sharder response", zap.Error(errd)) + } else { + for _, challengeObj := range blobberChallenges.Challenges { + if challengeObj == nil || len(challengeObj.ChallengeID) == 0 { + logging.Logger.Info("No challenge entity from the challenge map") + continue + } + + tx := datastore.GetStore().CreateTransaction(ctx) + db := datastore.GetStore().GetTransaction(tx) + _, err := GetChallengeEntity(tx, challengeObj.ChallengeID) + + // challenge is not synced in db yet + if errors.Is(err, gorm.ErrRecordNotFound) { + + latestChallenge, err := GetLastChallengeEntity(tx) + + if err != nil { + if !errors.Is(err, gorm.ErrRecordNotFound) { + logging.Logger.Info("Error in load challenge entity from database ", zap.Error(err)) + continue + } + } + + isFirstChallengeInDatabase := len(challengeObj.PrevChallengeID) == 0 || latestChallenge == nil + isNextChallengeOnChain := latestChallenge == nil || latestChallenge.ChallengeID == challengeObj.PrevChallengeID + + if isFirstChallengeInDatabase || isNextChallengeOnChain { + logging.Logger.Info("Adding new challenge found from blockchain", zap.String("challenge", challengeObj.ChallengeID)) + challengeObj.Status = Accepted + if err := challengeObj.Save(tx); err != nil { + logging.Logger.Error("ChallengeEntity_Save", zap.String("challenge_id", challengeObj.ChallengeID), zap.Error(err)) + } + } else { + logging.Logger.Error("Challenge chain is not valid") + } + + } + db.Commit() + tx.Done() + } + } + + } +} + +// processChallenges read and process challenges from db +func processChallenges(ctx context.Context) { + defer func() { + if r := recover(); r != nil { + logging.Logger.Error("[recover] processChallenges", zap.Any("err", r)) + } + }() + rctx := datastore.GetStore().CreateTransaction(ctx) + db := datastore.GetStore().GetTransaction(rctx) + openchallenges := make([]*ChallengeEntity, 0) + db.Where(ChallengeEntity{Status: Accepted}).Find(&openchallenges) + if len(openchallenges) > 0 { + swg := sizedwaitgroup.New(config.Configuration.ChallengeResolveNumWorkers) + for _, openchallenge := range openchallenges { + logging.Logger.Info("Processing the challenge", zap.Any("challenge_id", openchallenge.ChallengeID), zap.Any("openchallenge", openchallenge)) + err := openchallenge.UnmarshalFields() + if err != nil { + logging.Logger.Error("Error unmarshaling challenge entity.", zap.Error(err)) + continue + } + swg.Add() + go func(redeemCtx context.Context, challengeEntity *ChallengeEntity) { + redeemCtx = datastore.GetStore().CreateTransaction(redeemCtx) + defer redeemCtx.Done() + err := LoadValidationTickets(redeemCtx, challengeEntity) + if err != nil { + logging.Logger.Error("Getting validation tickets failed", zap.Any("challenge_id", challengeEntity.ChallengeID), zap.Error(err)) + } + db := datastore.GetStore().GetTransaction(redeemCtx) + err = db.Commit().Error + if err != nil { + logging.Logger.Error("Error commiting the readmarker redeem", zap.Error(err)) + } + swg.Done() + }(ctx, openchallenge) + } + swg.Wait() + } + db.Rollback() + rctx.Done() +} diff --git a/code/go/0chain.net/blobbercore/challenge/protocol.go b/code/go/0chain.net/blobbercore/challenge/protocol.go index 0f2cc5dc5..26b2b2c59 100644 --- a/code/go/0chain.net/blobbercore/challenge/protocol.go +++ b/code/go/0chain.net/blobbercore/challenge/protocol.go @@ -3,6 +3,8 @@ package challenge import ( "context" "encoding/json" + "fmt" + "math" "math/rand" "time" @@ -67,7 +69,8 @@ func (cr *ChallengeEntity) ErrorChallenge(ctx context.Context, err error) { } } -func (cr *ChallengeEntity) GetValidationTickets(ctx context.Context) error { +// LoadValidationTickets load validation tickets +func (cr *ChallengeEntity) LoadValidationTickets(ctx context.Context) error { if len(cr.Validators) == 0 { cr.StatusMessage = "No validators assigned to the challange" if err := cr.Save(ctx); err != nil { @@ -93,7 +96,6 @@ func (cr *ChallengeEntity) GetValidationTickets(ctx context.Context) error { blockNum := int64(0) if rootRef.NumBlocks > 0 { r := rand.New(rand.NewSource(cr.RandomNumber)) - //rand.Seed(cr.RandomNumber) blockNum = r.Int63n(rootRef.NumBlocks) blockNum = blockNum + 1 } else { @@ -139,9 +141,18 @@ func (cr *ChallengeEntity) GetValidationTickets(ctx context.Context) error { inputData.Name = objectPath.Meta["name"].(string) inputData.Path = objectPath.Meta["path"].(string) inputData.Hash = objectPath.Meta["content_hash"].(string) + inputData.ChunkSize = objectPath.ChunkSize + + maxNumBlocks := 1024 + + // the file is too small, some of 1024 blocks is not filled + if objectPath.Size < objectPath.ChunkSize { + merkleChunkSize := objectPath.ChunkSize / 1024 + maxNumBlocks = int(math.Ceil(float64(objectPath.Size) / float64(merkleChunkSize))) + } + r := rand.New(rand.NewSource(cr.RandomNumber)) - //rand.Seed(cr.RandomNumber) - blockoffset := r.Intn(1024) + blockoffset := r.Intn(maxNumBlocks) blockData, mt, err := filestore.GetFileStore().GetFileBlockForChallenge(cr.AllocationID, inputData, blockoffset) if err != nil { @@ -150,6 +161,7 @@ func (cr *ChallengeEntity) GetValidationTickets(ctx context.Context) error { } postData["data"] = []byte(blockData) postData["merkle_path"] = mt.GetPathByIndex(blockoffset) + postData["chunk_size"] = objectPath.ChunkSize } postDataBytes, err := json.Marshal(postData) @@ -169,7 +181,9 @@ func (cr *ChallengeEntity) GetValidationTickets(ctx context.Context) error { continue } } + url := validator.URL + VALIDATOR_URL + fmt.Println(url) resp, err := util.SendPostRequest(url, postDataBytes, nil) if err != nil { Logger.Info("Got error from the validator.", zap.Any("error", err.Error())) @@ -218,7 +232,7 @@ func (cr *ChallengeEntity) GetValidationTickets(ctx context.Context) error { cr.Result = ChallengeSuccess } else { cr.Result = ChallengeFailure - Logger.Error("Challenge failed by the validators", zap.Any("block_num", cr.BlockNum), zap.Any("object_path", objectPath), zap.Any("challenge", cr)) + //Logger.Error("Challenge failed by the validators", zap.Any("block_num", cr.BlockNum), zap.Any("object_path", objectPath), zap.Any("challenge", cr)) } cr.Status = Processed diff --git a/code/go/0chain.net/blobbercore/challenge/worker.go b/code/go/0chain.net/blobbercore/challenge/worker.go index d33f7a66b..abc23e4e7 100644 --- a/code/go/0chain.net/blobbercore/challenge/worker.go +++ b/code/go/0chain.net/blobbercore/challenge/worker.go @@ -1,56 +1,52 @@ package challenge import ( - "bytes" "context" - "encoding/json" - "errors" "time" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" - "github.com/0chain/blobber/code/go/0chain.net/core/chain" "github.com/0chain/blobber/code/go/0chain.net/core/lock" - "github.com/0chain/blobber/code/go/0chain.net/core/node" - "github.com/0chain/blobber/code/go/0chain.net/core/transaction" - - "github.com/remeh/sizedwaitgroup" - "gorm.io/gorm" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" "go.uber.org/zap" ) -type BCChallengeResponse struct { - BlobberID string `json:"blobber_id"` - Challenges []*ChallengeEntity `json:"challenges"` -} - +// SetupWorkers start challenge workers func SetupWorkers(ctx context.Context) { - go FindChallenges(ctx) + go startSyncChallenges(ctx) + go startProcessChallenges(ctx) go SubmitProcessedChallenges(ctx) //nolint:errcheck // goroutines } -func GetValidationTickets(ctx context.Context, challengeObj *ChallengeEntity) error { +// LoadValidationTickets load validation tickets for challenge +func LoadValidationTickets(ctx context.Context, challengeObj *ChallengeEntity) error { mutex := lock.GetMutex(challengeObj.TableName(), challengeObj.ChallengeID) mutex.Lock() - err := challengeObj.GetValidationTickets(ctx) + + defer func() { + if r := recover(); r != nil { + Logger.Error("[recover] LoadValidationTickets", zap.Any("err", r)) + } + }() + + err := challengeObj.LoadValidationTickets(ctx) if err != nil { Logger.Error("Error getting the validation tickets", zap.Error(err), zap.String("challenge_id", challengeObj.ChallengeID)) } - mutex.Unlock() + return err } -func SubmitProcessedChallenges(ctx context.Context) error { +func SubmitProcessedChallenges(ctx context.Context) { for { select { case <-ctx.Done(): - return ctx.Err() + return default: rctx := datastore.GetStore().CreateTransaction(ctx) db := datastore.GetStore().GetTransaction(rctx) - //lastChallengeRedeemed := &ChallengeEntity{} + rows, err := db.Table("challenges"). Select("commit_txn_id, sequence"). Where(ChallengeEntity{Status: Committed}). @@ -147,107 +143,29 @@ func SubmitProcessedChallenges(ctx context.Context) error { } time.Sleep(time.Duration(config.Configuration.ChallengeResolveFreq) * time.Second) } - - return nil //nolint:govet // need more time to verify } -var iterInprogress = false - -func FindChallenges(ctx context.Context) { +func startProcessChallenges(ctx context.Context) { ticker := time.NewTicker(time.Duration(config.Configuration.ChallengeResolveFreq) * time.Second) for { select { case <-ctx.Done(): return case <-ticker.C: - if !iterInprogress { - iterInprogress = true - rctx := datastore.GetStore().CreateTransaction(ctx) - db := datastore.GetStore().GetTransaction(rctx) - openchallenges := make([]*ChallengeEntity, 0) - db.Where(ChallengeEntity{Status: Accepted}).Find(&openchallenges) - if len(openchallenges) > 0 { - swg := sizedwaitgroup.New(config.Configuration.ChallengeResolveNumWorkers) - for _, openchallenge := range openchallenges { - Logger.Info("Processing the challenge", zap.Any("challenge_id", openchallenge.ChallengeID), zap.Any("openchallenge", openchallenge)) - err := openchallenge.UnmarshalFields() - if err != nil { - Logger.Error("Error unmarshaling challenge entity.", zap.Error(err)) - continue - } - swg.Add() - go func(redeemCtx context.Context, challengeEntity *ChallengeEntity) { - redeemCtx = datastore.GetStore().CreateTransaction(redeemCtx) - defer redeemCtx.Done() - err := GetValidationTickets(redeemCtx, challengeEntity) - if err != nil { - Logger.Error("Getting validation tickets failed", zap.Any("challenge_id", challengeEntity.ChallengeID), zap.Error(err)) - } - db := datastore.GetStore().GetTransaction(redeemCtx) - err = db.Commit().Error - if err != nil { - Logger.Error("Error commiting the readmarker redeem", zap.Error(err)) - } - swg.Done() - }(ctx, openchallenge) - } - swg.Wait() - } - db.Rollback() - rctx.Done() - - params := make(map[string]string) - params["blobber"] = node.Self.ID - - var blobberChallenges BCChallengeResponse - blobberChallenges.Challenges = make([]*ChallengeEntity, 0) - retBytes, err := transaction.MakeSCRestAPICall(transaction.STORAGE_CONTRACT_ADDRESS, "/openchallenges", params, chain.GetServerChain()) - - if err != nil { - Logger.Error("Error getting the open challenges from the blockchain", zap.Error(err)) - } else { - tCtx := datastore.GetStore().CreateTransaction(ctx) - db := datastore.GetStore().GetTransaction(tCtx) - bytesReader := bytes.NewBuffer(retBytes) - - d := json.NewDecoder(bytesReader) - d.UseNumber() - errd := d.Decode(&blobberChallenges) + processChallenges(ctx) + } + } +} - if errd != nil { - Logger.Error("Error in unmarshal of the sharder response", zap.Error(errd)) - } else { - for _, v := range blobberChallenges.Challenges { - if v == nil || len(v.ChallengeID) == 0 { - Logger.Info("No challenge entity from the challenge map") - continue - } - - challengeObj := v - _, err := GetChallengeEntity(tCtx, challengeObj.ChallengeID) - - if errors.Is(err, gorm.ErrRecordNotFound) { - latestChallenge, err := GetLastChallengeEntity(tCtx) - if err == nil || errors.Is(err, gorm.ErrRecordNotFound) { - if (latestChallenge == nil && len(challengeObj.PrevChallengeID) == 0) || latestChallenge.ChallengeID == challengeObj.PrevChallengeID { - Logger.Info("Adding new challenge found from blockchain", zap.String("challenge", v.ChallengeID)) - challengeObj.Status = Accepted - if err := challengeObj.Save(tCtx); err != nil { - Logger.Error("ChallengeEntity_Save", zap.String("challenge_id", challengeObj.ChallengeID), zap.Error(err)) - } - } else { - Logger.Error("Challenge chain is not valid") - } - } - //go stats.AddNewChallengeEvent(challengeObj.AllocationID, challengeObj.ID) - } - } - } - db.Commit() - tCtx.Done() - } - iterInprogress = false - } +// startSyncChallenges +func startSyncChallenges(ctx context.Context) { + ticker := time.NewTicker(time.Duration(config.Configuration.ChallengeResolveFreq) * time.Second) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + syncChallenges(ctx) } } } diff --git a/code/go/0chain.net/blobbercore/config/config.go b/code/go/0chain.net/blobbercore/config/config.go index 332e65110..eae61c8b8 100644 --- a/code/go/0chain.net/blobbercore/config/config.go +++ b/code/go/0chain.net/blobbercore/config/config.go @@ -49,7 +49,13 @@ func SetupConfig(configPath string) { viper.SetEnvKeyReplacer(replacer) viper.AutomaticEnv() viper.SetConfigName("0chain_blobber") - viper.AddConfigPath(configPath) + + if configPath == "" { + viper.AddConfigPath("./config") + } else { + viper.AddConfigPath(configPath) + } + err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", err)) diff --git a/code/go/0chain.net/blobbercore/constants/context_key.go b/code/go/0chain.net/blobbercore/constants/context_key.go deleted file mode 100644 index d7be3edc7..000000000 --- a/code/go/0chain.net/blobbercore/constants/context_key.go +++ /dev/null @@ -1,12 +0,0 @@ -package constants - -import "github.com/0chain/blobber/code/go/0chain.net/core/common" - -const ( - ALLOCATION_CONTEXT_KEY common.ContextKey = "allocation" - CLIENT_CONTEXT_KEY common.ContextKey = "client" - CLIENT_KEY_CONTEXT_KEY common.ContextKey = "client_key" - - // CLIENT_SIGNATURE_HEADER_KEY represents key for context value passed with common.ClientSignatureHeader request header. - CLIENT_SIGNATURE_HEADER_KEY common.ContextKey = "signature" -) diff --git a/code/go/0chain.net/blobbercore/convert/convert.go b/code/go/0chain.net/blobbercore/convert/convert.go index 4934027e4..e1855d415 100644 --- a/code/go/0chain.net/blobbercore/convert/convert.go +++ b/code/go/0chain.net/blobbercore/convert/convert.go @@ -427,7 +427,7 @@ func convertDirMetaDataGRPCToDirRef(dirref *blobbergrpc.DirMetaData) *reference. } func WriteFileGRPCToHTTP(req *blobbergrpc.UploadFileRequest) (*http.Request, error) { - var formData allocation.UpdateFileChange + var formData allocation.UpdateFileChanger var uploadMetaString string switch req.Method { case `POST`: diff --git a/code/go/0chain.net/blobbercore/datastore/mock_store.go b/code/go/0chain.net/blobbercore/datastore/mock_store.go index c10457bba..1b0de35eb 100644 --- a/code/go/0chain.net/blobbercore/datastore/mock_store.go +++ b/code/go/0chain.net/blobbercore/datastore/mock_store.go @@ -11,7 +11,6 @@ import ( "gorm.io/gorm" ) - func MocketTheStore(t *testing.T, logging bool) { var err error @@ -27,11 +26,15 @@ func MocketTheStore(t *testing.T, logging bool) { gdb, err := gorm.Open(dialect, new(gorm.Config)) require.NoError(t, err) - setDB(gdb) + //setDB(gdb) + + instance = &postgresStore{ + db: gdb, + } } // sqlmock has problems with inserts, so use mocket for tests with inserts -// https://github.com/DATA-DOG/go-sqlmock/issues/118 + func MockTheStore(t *testing.T) sqlmock.Sqlmock { var db *sql.DB var mock sqlmock.Sqlmock @@ -49,7 +52,9 @@ func MockTheStore(t *testing.T) sqlmock.Sqlmock { gdb, err = gorm.Open(dialector, &gorm.Config{}) require.NoError(t, err) - setDB(gdb) + instance = &postgresStore{ + db: gdb, + } return mock } diff --git a/code/go/0chain.net/blobbercore/datastore/mocket.go b/code/go/0chain.net/blobbercore/datastore/mocket.go new file mode 100644 index 000000000..248695263 --- /dev/null +++ b/code/go/0chain.net/blobbercore/datastore/mocket.go @@ -0,0 +1,88 @@ +package datastore + +import ( + "context" + + . "github.com/0chain/blobber/code/go/0chain.net/core/logging" + mocket "github.com/selvatico/go-mocket" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" +) + +var mocketInstance *Mocket + +// UseMocket use mocket to mock sql driver +func UseMocket(logging bool) { + if mocketInstance == nil { + mocketInstance = &Mocket{} + mocketInstance.logging = logging + err := mocketInstance.Open() + if err != nil { + panic("UseMocket: " + err.Error()) + } + } + + instance = mocketInstance +} + +// Mocket mock sql driver in data-dog/sqlmock +type Mocket struct { + logging bool + db *gorm.DB +} + +func (store *Mocket) Open() error { + + mocket.Catcher.Reset() + mocket.Catcher.Register() + mocket.Catcher.Logging = store.logging + + dialector := postgres.New(postgres.Config{ + DSN: "mockdb", + DriverName: mocket.DriverName, + PreferSimpleProtocol: true, + }) + + cfg := &gorm.Config{} + + if !store.logging { + cfg.Logger = logger.Default.LogMode(logger.Silent) + } + + gdb, err := gorm.Open(dialector, cfg) + if err != nil { + return err + } + + store.db = gdb + + return nil +} + +func (store *Mocket) Close() { + if store.db != nil { + + if db, _ := store.db.DB(); db != nil { + db.Close() + } + } +} + +func (store *Mocket) CreateTransaction(ctx context.Context) context.Context { + db := store.db.Begin() + return context.WithValue(ctx, ContextKeyTransaction, db) +} + +func (store *Mocket) GetTransaction(ctx context.Context) *gorm.DB { + conn := ctx.Value(ContextKeyTransaction) + if conn != nil { + return conn.(*gorm.DB) + } + Logger.Error("No connection in the context.") + return nil +} + +func (store *Mocket) GetDB() *gorm.DB { + return store.db +} diff --git a/code/go/0chain.net/blobbercore/datastore/postgres.go b/code/go/0chain.net/blobbercore/datastore/postgres.go new file mode 100644 index 000000000..98a0db1f0 --- /dev/null +++ b/code/go/0chain.net/blobbercore/datastore/postgres.go @@ -0,0 +1,68 @@ +package datastore + +import ( + "context" + "fmt" + "time" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/core/common" + . "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "gorm.io/driver/postgres" + "gorm.io/gorm" +) + +// postgresStore store implementation for postgres +type postgresStore struct { + db *gorm.DB +} + +func (store *postgresStore) Open() error { + db, err := gorm.Open(postgres.Open(fmt.Sprintf( + "host=%v port=%v user=%v dbname=%v password=%v sslmode=disable", + config.Configuration.DBHost, config.Configuration.DBPort, + config.Configuration.DBUserName, config.Configuration.DBName, + config.Configuration.DBPassword)), &gorm.Config{}) + if err != nil { + return common.NewErrorf("db_open_error", "Error opening the DB connection: %v", err) + } + + sqldb, err := db.DB() + if err != nil { + return common.NewErrorf("db_open_error", "Error opening the DB connection: %v", err) + } + + sqldb.SetMaxIdleConns(100) + sqldb.SetMaxOpenConns(200) + sqldb.SetConnMaxLifetime(30 * time.Second) + // Enable Logger, show detailed log + //db.LogMode(true) + store.db = db + return nil +} + +func (store *postgresStore) Close() { + if store.db != nil { + if sqldb, _ := store.db.DB(); sqldb != nil { + sqldb.Close() + } + } +} + +func (store *postgresStore) CreateTransaction(ctx context.Context) context.Context { + db := store.db.Begin() + return context.WithValue(ctx, ContextKeyTransaction, db) +} + +func (store *postgresStore) GetTransaction(ctx context.Context) *gorm.DB { + conn := ctx.Value(ContextKeyTransaction) + if conn != nil { + return conn.(*gorm.DB) + } + Logger.Error("No connection in the context.") + return nil +} + +func (store *postgresStore) GetDB() *gorm.DB { + return store.db +} diff --git a/code/go/0chain.net/blobbercore/datastore/sqlmock.go b/code/go/0chain.net/blobbercore/datastore/sqlmock.go new file mode 100644 index 000000000..f13fbb365 --- /dev/null +++ b/code/go/0chain.net/blobbercore/datastore/sqlmock.go @@ -0,0 +1,83 @@ +package datastore + +import ( + "context" + + . "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/DATA-DOG/go-sqlmock" + "gorm.io/driver/postgres" + "gorm.io/gorm" +) + +var sqlmockInstance *Sqlmock + +// UseSqlmock use sqlmock to mock sql driver +func UseSqlmock() { + if sqlmockInstance == nil { + sqlmockInstance = &Sqlmock{} + err := sqlmockInstance.Open() + if err != nil { + panic("UseSqlmock: " + err.Error()) + } + } + + instance = sqlmockInstance +} + +// Sqlmock mock sql driver in data-dog/sqlmock +type Sqlmock struct { + db *gorm.DB + Sqlmock sqlmock.Sqlmock +} + +func (store *Sqlmock) Open() error { + + db, mock, err := sqlmock.New() + if err != nil { + return err + } + + var dialector = postgres.New(postgres.Config{ + DSN: "sqlmock_db_0", + DriverName: "postgres", + Conn: db, + PreferSimpleProtocol: true, + }) + var gdb *gorm.DB + gdb, err = gorm.Open(dialector, &gorm.Config{}) + if err != nil { + return err + } + + store.db = gdb + store.Sqlmock = mock + + return nil +} + +func (store *Sqlmock) Close() { + if store.db != nil { + + if db, _ := store.db.DB(); db != nil { + db.Close() + } + } +} + +func (store *Sqlmock) CreateTransaction(ctx context.Context) context.Context { + db := store.db.Begin() + return context.WithValue(ctx, ContextKeyTransaction, db) +} + +func (store *Sqlmock) GetTransaction(ctx context.Context) *gorm.DB { + conn := ctx.Value(ContextKeyTransaction) + if conn != nil { + return conn.(*gorm.DB) + } + Logger.Error("No connection in the context.") + return nil +} + +func (store *Sqlmock) GetDB() *gorm.DB { + return store.db +} diff --git a/code/go/0chain.net/blobbercore/datastore/store.go b/code/go/0chain.net/blobbercore/datastore/store.go index 529a3016a..bfaae071d 100644 --- a/code/go/0chain.net/blobbercore/datastore/store.go +++ b/code/go/0chain.net/blobbercore/datastore/store.go @@ -2,82 +2,45 @@ package datastore import ( "context" - "fmt" - "time" - "github.com/0chain/blobber/code/go/0chain.net/core/common" - - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" - "gorm.io/driver/postgres" "gorm.io/gorm" - - . "github.com/0chain/blobber/code/go/0chain.net/core/logging" ) type contextKey int -const CONNECTION_CONTEXT_KEY contextKey = iota - -type Store struct { - db *gorm.DB -} +const ( + ContextKeyTransaction contextKey = iota + ContextKeyStore +) -var store Store +type Store interface { -func setDB(db *gorm.DB) { - store.db = db -} + // GetDB get raw gorm db + GetDB() *gorm.DB + // CreateTransaction create transaction, and save it in context + CreateTransaction(ctx context.Context) context.Context + // GetTransaction get transaction from context + GetTransaction(ctx context.Context) *gorm.DB -func GetStore() *Store { - return &store + Open() error + Close() } -func (store *Store) Open() error { - db, err := gorm.Open(postgres.Open(fmt.Sprintf( - "host=%v port=%v user=%v dbname=%v password=%v sslmode=disable", - config.Configuration.DBHost, config.Configuration.DBPort, - config.Configuration.DBUserName, config.Configuration.DBName, - config.Configuration.DBPassword)), &gorm.Config{}) - if err != nil { - return common.NewErrorf("db_open_error", "Error opening the DB connection: %v", err) - } +var instance Store - sqldb, err := db.DB() - if err != nil { - return common.NewErrorf("db_open_error", "Error opening the DB connection: %v", err) - } - - sqldb.SetMaxIdleConns(100) - sqldb.SetMaxOpenConns(200) - sqldb.SetConnMaxLifetime(30 * time.Second) - // Enable Logger, show detailed log - //db.LogMode(true) - store.db = db - return nil -} - -func (store *Store) Close() { - if store.db != nil { - if sqldb, _ := store.db.DB(); sqldb != nil { - sqldb.Close() - } - } +func init() { + instance = &postgresStore{} } -func (store *Store) CreateTransaction(ctx context.Context) context.Context { - db := store.db.Begin() - return context.WithValue(ctx, CONNECTION_CONTEXT_KEY, db) //nolint:staticcheck // changing type might require further refactor +func GetStore() Store { + return instance } -func (store *Store) GetTransaction(ctx context.Context) *gorm.DB { - conn := ctx.Value(CONNECTION_CONTEXT_KEY) - if conn != nil { - return conn.(*gorm.DB) +func FromContext(ctx context.Context) Store { + store := ctx.Value(ContextKeyStore) + if store != nil { + return store.(Store) } - Logger.Error("No connection in the context.") - return nil -} -func (store *Store) GetDB() *gorm.DB { - return store.db + return GetStore() } diff --git a/code/go/0chain.net/blobbercore/filestore/fs_store.go b/code/go/0chain.net/blobbercore/filestore/fs_store.go index 728b703f2..f17350927 100644 --- a/code/go/0chain.net/blobbercore/filestore/fs_store.go +++ b/code/go/0chain.net/blobbercore/filestore/fs_store.go @@ -9,13 +9,14 @@ import ( "fmt" "hash" "io" - "io/ioutil" + "math" "mime/multipart" "os" "path/filepath" "strings" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/0chain/errors" "go.uber.org/zap" "github.com/0chain/blobber/code/go/0chain.net/core/common" @@ -23,9 +24,11 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" - "github.com/0chain/blobber/code/go/0chain.net/core/util" + "github.com/0chain/gosdk/constants" "github.com/minio/minio-go" "golang.org/x/crypto/sha3" + + "github.com/0chain/gosdk/core/util" ) const ( @@ -82,18 +85,18 @@ func (FileBlockGetter) GetFileBlock(fs *FileFSStore, allocationID string, fileDa return nil, err } - filesize := int(fileinfo.Size()) - maxBlockNum := int64(filesize / CHUNK_SIZE) + filesize := fileinfo.Size() + maxBlockNum := filesize / fileData.ChunkSize // check for any left over bytes. Add one more go routine if required. - if remainder := filesize % CHUNK_SIZE; remainder != 0 { + if remainder := filesize % fileData.ChunkSize; remainder != 0 { maxBlockNum++ } if blockNum > maxBlockNum || blockNum < 1 { return nil, common.NewError("invalid_block_number", "Invalid block number") } - buffer := make([]byte, CHUNK_SIZE*numBlocks) - n, err := file.ReadAt(buffer, ((blockNum - 1) * CHUNK_SIZE)) + buffer := make([]byte, int64(fileData.ChunkSize)*numBlocks) + n, err := file.ReadAt(buffer, ((blockNum - 1) * int64(fileData.ChunkSize))) if err != nil && err != io.EOF { return nil, err } @@ -114,6 +117,16 @@ type StoreAllocation struct { TempObjectsPath string } +var fileFSStore *FileFSStore + +func UseDisk() { + if fileFSStore == nil { + panic("UseDisk: please SetupFSStore first") + } + + fileStore = fileFSStore +} + func SetupFSStore(rootDir string) (FileStore, error) { if err := createDirs(rootDir); err != nil { return nil, err @@ -122,12 +135,15 @@ func SetupFSStore(rootDir string) (FileStore, error) { } func SetupFSStoreI(rootDir string, fileBlockGetter IFileBlockGetter) (FileStore, error) { - fsStore = &FileFSStore{ + fileFSStore = &FileFSStore{ RootDirectory: rootDir, Minio: intializeMinio(), fileBlockGetter: fileBlockGetter, } - return fsStore, nil + + fileStore = fileFSStore + + return fileStore, nil } func intializeMinio() *minio.Client { @@ -300,49 +316,51 @@ func (fs *FileFSStore) GetFileBlockForChallenge(allocationID string, fileData *F var returnBytes []byte - merkleHashes := make([]hash.Hash, 1024) - merkleLeaves := make([]util.Hashable, 1024) - for idx := range merkleHashes { - merkleHashes[idx] = sha3.New256() - } + fi, _ := file.Stat() + + numChunks := int(math.Ceil(float64(fi.Size()) / float64(fileData.ChunkSize))) + + fmt := util.NewFixedMerkleTree(int(fileData.ChunkSize)) + bytesBuf := bytes.NewBuffer(make([]byte, 0)) - for { - _, err := io.CopyN(bytesBuf, file, CHUNK_SIZE) - if err != io.EOF && err != nil { - return nil, nil, common.NewError("file_write_error", err.Error()) - } - dataBytes := bytesBuf.Bytes() - tmpBytes := make([]byte, len(dataBytes)) - copy(tmpBytes, dataBytes) - merkleChunkSize := 64 - for i := 0; i < len(tmpBytes); i += merkleChunkSize { - end := i + merkleChunkSize - if end > len(tmpBytes) { - end = len(tmpBytes) + for chunkIndex := 0; chunkIndex < numChunks; chunkIndex++ { + written, err := io.CopyN(bytesBuf, file, fileData.ChunkSize) + + if written > 0 { + dataBytes := bytesBuf.Bytes() + + err2 := fmt.Write(dataBytes, chunkIndex) + if err2 != nil { + return nil, nil, errors.ThrowLog(err2.Error(), constants.ErrUnableHash) } - offset := i / merkleChunkSize - merkleHashes[offset].Write(tmpBytes[i:end]) - if offset == blockoffset { - returnBytes = append(returnBytes, tmpBytes[i:end]...) + + merkleChunkSize := int(fileData.ChunkSize / 1024) + for i := 0; i < len(dataBytes); i += merkleChunkSize { + end := i + merkleChunkSize + if end > len(dataBytes) { + end = len(dataBytes) + } + offset := i / merkleChunkSize + + if offset == blockoffset { + returnBytes = append(returnBytes, dataBytes[i:end]...) + } } + bytesBuf.Reset() } - bytesBuf.Reset() + if err != nil && err == io.EOF { break } } - for idx := range merkleHashes { - merkleLeaves[idx] = util.NewStringHashable(hex.EncodeToString(merkleHashes[idx].Sum(nil))) - } - var mt util.MerkleTreeI = &util.MerkleTree{} - mt.ComputeTree(merkleLeaves) - - return returnBytes, mt, nil + return returnBytes, fmt.GetMerkleTree(), nil } func (fs *FileFSStore) GetFileBlock(allocationID string, fileData *FileInputData, blockNum int64, numBlocks int64) ([]byte, error) { + return fs.fileBlockGetter.GetFileBlock(fs, allocationID, fileData, blockNum, numBlocks) + } func (fs *FileFSStore) DeleteTempFile(allocationID string, fileData *FileInputData, connectionID string) error { @@ -426,72 +444,6 @@ func (fs *FileFSStore) DeleteFile(allocationID string, contentHash string) error return os.Remove(fileObjectPath) } -func (fs *FileFSStore) GetMerkleTreeForFile(allocationID string, fileData *FileInputData) (util.MerkleTreeI, error) { - allocation, err := fs.SetupAllocation(allocationID, true) - if err != nil { - return nil, common.NewError("filestore_setup_error", "Error setting the fs store. "+err.Error()) - } - dirPath, destFile := GetFilePathFromHash(fileData.Hash) - fileObjectPath := filepath.Join(allocation.ObjectsPath, dirPath) - fileObjectPath = filepath.Join(fileObjectPath, destFile) - - file, err := os.Open(fileObjectPath) - if err != nil { - if os.IsNotExist(err) && fileData.OnCloud { - err = fs.DownloadFromCloud(fileData.Hash, fileObjectPath) - if err != nil { - return nil, common.NewError("minio_download_failed", "Unable to download from minio with err "+err.Error()) - } - file, err = os.Open(fileObjectPath) - if err != nil { - return nil, err - } - } else { - return nil, err - } - } - defer file.Close() - //merkleHash := sha3.New256() - tReader := file //io.TeeReader(file, merkleHash) - //merkleLeaves := make([]util.Hashable, 0) - merkleHashes := make([]hash.Hash, 1024) - merkleLeaves := make([]util.Hashable, 1024) - for idx := range merkleHashes { - merkleHashes[idx] = sha3.New256() - } - bytesBuf := bytes.NewBuffer(make([]byte, 0)) - for { - _, err := io.CopyN(bytesBuf, tReader, CHUNK_SIZE) - if err != io.EOF && err != nil { - return nil, common.NewError("file_write_error", err.Error()) - } - dataBytes := bytesBuf.Bytes() - merkleChunkSize := 64 - for i := 0; i < len(dataBytes); i += merkleChunkSize { - end := i + merkleChunkSize - if end > len(dataBytes) { - end = len(dataBytes) - } - offset := i / merkleChunkSize - merkleHashes[offset].Write(dataBytes[i:end]) - } - //merkleLeaves = append(merkleLeaves, util.NewStringHashable(hex.EncodeToString(merkleHash.Sum(nil)))) - //merkleHash.Reset() - bytesBuf.Reset() - if err != nil && err == io.EOF { - break - } - } - for idx := range merkleHashes { - merkleLeaves[idx] = util.NewStringHashable(hex.EncodeToString(merkleHashes[idx].Sum(nil))) - } - - var mt util.MerkleTreeI = &util.MerkleTree{} - mt.ComputeTree(merkleLeaves) - - return mt, nil -} - func (fs *FileFSStore) CreateDir(dirName string) error { return createDirs(dirName) } @@ -503,48 +455,28 @@ func (fs *FileFSStore) DeleteDir(allocationID, dirPath, connectionID string) err func (fs *FileFSStore) WriteFile(allocationID string, fileData *FileInputData, infile multipart.File, connectionID string) (*FileOutputData, error) { + if fileData.IsChunked { + return fs.WriteChunk(allocationID, fileData, infile, connectionID) + } + allocation, err := fs.SetupAllocation(allocationID, false) if err != nil { return nil, common.NewError("filestore_setup_error", "Error setting the fs store. "+err.Error()) } tempFilePath := fs.generateTempPath(allocation, fileData, connectionID) - dest, err := NewChunkWriter(tempFilePath) + dest, err := os.Create(tempFilePath) if err != nil { return nil, common.NewError("file_creation_error", err.Error()) } defer dest.Close() fileRef := &FileOutputData{} - var fileReader io.Reader = infile - - if fileData.IsResumable { - h := sha1.New() - offset, err := dest.WriteChunk(context.TODO(), fileData.UploadOffset, io.TeeReader(fileReader, h)) - - if err != nil { - return nil, common.NewError("file_write_error", err.Error()) - } - - fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) - fileRef.Size = dest.Size() - fileRef.Name = fileData.Name - fileRef.Path = fileData.Path - fileRef.UploadOffset = fileData.UploadOffset + offset - fileRef.UploadLength = fileData.UploadLength - - if !fileData.IsFinal { - //skip to compute hash until the last chunk is uploaded - return fileRef, nil - } - - fileReader = dest - } h := sha1.New() bytesBuffer := bytes.NewBuffer(nil) multiHashWriter := io.MultiWriter(h, bytesBuffer) - tReader := io.TeeReader(fileReader, multiHashWriter) + tReader := io.TeeReader(infile, multiHashWriter) merkleHashes := make([]hash.Hash, 1024) merkleLeaves := make([]util.Hashable, 1024) for idx := range merkleHashes { @@ -552,14 +484,8 @@ func (fs *FileFSStore) WriteFile(allocationID string, fileData *FileInputData, } fileSize := int64(0) for { - var written int64 - if fileData.IsResumable { - //all chunks have been written, only read bytes from local file , and compute hash - written, err = io.CopyN(ioutil.Discard, tReader, CHUNK_SIZE) - } else { - written, err = io.CopyN(dest, tReader, CHUNK_SIZE) - } + written, err := io.CopyN(dest, tReader, CHUNK_SIZE) if err != io.EOF && err != nil { return nil, common.NewError("file_write_error", err.Error()) @@ -588,17 +514,50 @@ func (fs *FileFSStore) WriteFile(allocationID string, fileData *FileInputData, var mt util.MerkleTreeI = &util.MerkleTree{} mt.ComputeTree(merkleLeaves) - //only update hash for whole file when it is not a resumable upload or is final chunk. - if !fileData.IsResumable || fileData.IsFinal { - fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) - } - + fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) fileRef.Size = fileSize fileRef.Name = fileData.Name fileRef.Path = fileData.Path fileRef.MerkleRoot = mt.GetRoot() - fileRef.UploadOffset = fileSize - fileRef.UploadLength = fileData.UploadLength + + return fileRef, nil +} + +// WriteChunk append chunk to temp file +func (fs *FileFSStore) WriteChunk(allocationID string, fileData *FileInputData, + infile multipart.File, connectionID string) (*FileOutputData, error) { + + allocation, err := fs.SetupAllocation(allocationID, false) + if err != nil { + return nil, common.NewError("filestore_setup_error", "Error setting the fs store. "+err.Error()) + } + + tempFilePath := fs.generateTempPath(allocation, fileData, connectionID) + dest, err := NewChunkWriter(tempFilePath) + if err != nil { + return nil, common.NewError("file_creation_error", err.Error()) + } + defer dest.Close() + + fileRef := &FileOutputData{} + + // the chunk has been rewitten. but it is lost when network is broken, and it is not save in db + if dest.size > fileData.UploadOffset { + fileRef.ChunkUploaded = true + } + + h := sha1.New() + size, err := dest.WriteChunk(context.TODO(), fileData.UploadOffset, io.TeeReader(infile, h)) + + if err != nil { + return nil, errors.ThrowLog(err.Error(), constants.ErrUnableWriteFile) + } + + fileRef.Size = size + fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) + + fileRef.Name = fileData.Name + fileRef.Path = fileData.Path return fileRef, nil } diff --git a/code/go/0chain.net/blobbercore/filestore/mock_store.go b/code/go/0chain.net/blobbercore/filestore/mock_store.go new file mode 100644 index 000000000..fcda9fc75 --- /dev/null +++ b/code/go/0chain.net/blobbercore/filestore/mock_store.go @@ -0,0 +1,101 @@ +package filestore + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "io" + "mime/multipart" + + "github.com/0chain/gosdk/constants" + "github.com/0chain/gosdk/core/util" +) + +type MockStore struct { +} + +var mockStore *MockStore + +func UseMock() { + if mockStore == nil { + mockStore = &MockStore{} + + } + + fileStore = mockStore +} + +// WriteFile write chunk file into disk +func (ms *MockStore) WriteFile(allocationID string, fileData *FileInputData, infile multipart.File, connectionID string) (*FileOutputData, error) { + fileRef := &FileOutputData{} + + fileRef.ChunkUploaded = true + + h := sha1.New() + reader := io.TeeReader(infile, h) + fileSize := int64(0) + for { + + written, err := io.CopyN(io.Discard, reader, fileData.ChunkSize) + + fileSize += written + + if err != nil { + break + } + } + + fileRef.Size = fileSize + fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) + + fileRef.Name = fileData.Name + fileRef.Path = fileData.Path + + return fileRef, nil +} +func (ms *MockStore) DeleteTempFile(allocationID string, fileData *FileInputData, connectionID string) error { + return nil +} + +func (ms *MockStore) CreateDir(dirName string) error { + return nil +} +func (ms *MockStore) DeleteDir(allocationID, dirPath, connectionID string) error { + return nil +} + +func (ms *MockStore) GetFileBlock(allocationID string, fileData *FileInputData, blockNum int64, numBlocks int64) ([]byte, error) { + return nil, constants.ErrNotImplemented +} + +func (ms *MockStore) CommitWrite(allocationID string, fileData *FileInputData, connectionID string) (bool, error) { + return true, nil +} + +func (ms *MockStore) GetFileBlockForChallenge(allocationID string, fileData *FileInputData, blockoffset int) (json.RawMessage, util.MerkleTreeI, error) { + return nil, nil, constants.ErrNotImplemented +} +func (ms *MockStore) DeleteFile(allocationID string, contentHash string) error { + return nil +} +func (ms *MockStore) GetTotalDiskSizeUsed() (int64, error) { + return 0, constants.ErrNotImplemented +} +func (ms *MockStore) GetlDiskSizeUsed(allocationID string) (int64, error) { + return 0, constants.ErrNotImplemented +} +func (ms *MockStore) GetTempPathSize(allocationID string) (int64, error) { + return 0, constants.ErrNotImplemented +} +func (ms *MockStore) IterateObjects(allocationID string, handler FileObjectHandler) error { + return nil +} +func (ms *MockStore) UploadToCloud(fileHash, filePath string) error { + return nil +} +func (ms *MockStore) DownloadFromCloud(fileHash, filePath string) error { + return nil +} +func (ms *MockStore) SetupAllocation(allocationID string, skipCreate bool) (*StoreAllocation, error) { + return nil, constants.ErrNotImplemented +} diff --git a/code/go/0chain.net/blobbercore/filestore/store.go b/code/go/0chain.net/blobbercore/filestore/store.go index 565b27d15..305ad2165 100644 --- a/code/go/0chain.net/blobbercore/filestore/store.go +++ b/code/go/0chain.net/blobbercore/filestore/store.go @@ -4,7 +4,7 @@ import ( "encoding/json" "mime/multipart" - "github.com/0chain/blobber/code/go/0chain.net/core/util" + "github.com/0chain/gosdk/core/util" ) const CHUNK_SIZE = 64 * 1024 @@ -15,8 +15,10 @@ type FileInputData struct { Hash string OnCloud bool - //IsResumable the request is resumable upload - IsResumable bool + // ChunkSize chunk size + ChunkSize int64 + //IsChunked the request is chunked upload + IsChunked bool //UploadLength indicates the size of the entire upload in bytes. The value MUST be a non-negative integer. UploadLength int64 //Upload-Offset indicates a byte offset within a resource. The value MUST be a non-negative integer. @@ -30,24 +32,26 @@ type FileOutputData struct { Path string MerkleRoot string ContentHash string - Size int64 - - //UploadLength indicates the size of the entire upload in bytes. The value MUST be a non-negative integer. - UploadLength int64 - //Upload-Offset indicates a byte offset within a resource. The value MUST be a non-negative integer. - UploadOffset int64 + // Size wirtten size/chunk size + Size int64 + // ChunkUploaded the chunk is uploaded or not. + ChunkUploaded bool } type FileObjectHandler func(contentHash string, contentSize int64) type FileStore interface { + // WriteFile write chunk file into disk WriteFile(allocationID string, fileData *FileInputData, infile multipart.File, connectionID string) (*FileOutputData, error) DeleteTempFile(allocationID string, fileData *FileInputData, connectionID string) error + CreateDir(dirName string) error DeleteDir(allocationID, dirPath, connectionID string) error + GetFileBlock(allocationID string, fileData *FileInputData, blockNum int64, numBlocks int64) ([]byte, error) + CommitWrite(allocationID string, fileData *FileInputData, connectionID string) (bool, error) - //GetMerkleTreeForFile(allocationID string, fileData *FileInputData) (util.MerkleTreeI, error) + GetFileBlockForChallenge(allocationID string, fileData *FileInputData, blockoffset int) (json.RawMessage, util.MerkleTreeI, error) DeleteFile(allocationID string, contentHash string) error GetTotalDiskSizeUsed() (int64, error) @@ -59,8 +63,8 @@ type FileStore interface { SetupAllocation(allocationID string, skipCreate bool) (*StoreAllocation, error) } -var fsStore FileStore +var fileStore FileStore func GetFileStore() FileStore { - return fsStore + return fileStore } diff --git a/code/go/0chain.net/blobbercore/handler/commit_integration_test.go b/code/go/0chain.net/blobbercore/handler/commit_integration_test.go index 855fd0cbb..479f6f6be 100644 --- a/code/go/0chain.net/blobbercore/handler/commit_integration_test.go +++ b/code/go/0chain.net/blobbercore/handler/commit_integration_test.go @@ -4,11 +4,12 @@ import ( "context" "encoding/hex" "encoding/json" - blobbergrpc "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobbergrpc/proto" "strconv" "testing" "time" + blobbergrpc "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobbergrpc/proto" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/writemarker" "github.com/0chain/blobber/code/go/0chain.net/core/common" @@ -37,6 +38,7 @@ func TestBlobberGRPCService_Commit(t *testing.T) { ContentHash: "contentHash", MerkleRoot: "merkleRoot", ActualFileHash: "actualFileHash", + ChunkSize: 65536, } rootRefHash := encryption.Hash(encryption.Hash(fr.GetFileHashData())) diff --git a/code/go/0chain.net/blobbercore/handler/file_command.go b/code/go/0chain.net/blobbercore/handler/file_command.go new file mode 100644 index 000000000..9601b5248 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/file_command.go @@ -0,0 +1,39 @@ +package handler + +import ( + "context" + "net/http" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobberhttp" +) + +// FileCommand execute command for a file operation +type FileCommand interface { + // IsAuthorized validate request, and try build ChangeProcesser instance + IsAuthorized(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, clientID string) error + + // ProcessContent flush file to FileStorage + ProcessContent(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) (blobberhttp.UploadResult, error) + + // ProcessThumbnail flush thumbnail file to FileStorage if it has. + ProcessThumbnail(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) error + + // UpdateChange update AllocationChangeProcessor. It will be president in db for commiting transcation + UpdateChange(ctx context.Context, connectionObj *allocation.AllocationChangeCollector) error +} + +// createFileCommand create file command for UPLOAD,UPDATE and DELETE +func createFileCommand(req *http.Request) FileCommand { + switch req.Method { + case http.MethodPost: + return &AddFileCommand{} + case http.MethodPut: + return &UpdateFileCommand{} + case http.MethodDelete: + return &FileCommandDelete{} + + default: + return &AddFileCommand{} + } +} diff --git a/code/go/0chain.net/blobbercore/handler/file_command_add.go b/code/go/0chain.net/blobbercore/handler/file_command_add.go new file mode 100644 index 000000000..edcf57aa0 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/file_command_add.go @@ -0,0 +1,191 @@ +package handler + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobberhttp" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" + "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/0chain/gosdk/constants" + "github.com/0chain/gosdk/zboxcore/fileref" + "go.uber.org/zap" +) + +// AddFileCommand command for resuming file +type AddFileCommand struct { + allocationChange *allocation.AllocationChange + fileChanger *allocation.AddFileChanger +} + +// IsAuthorized validate request. +func (cmd *AddFileCommand) IsAuthorized(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, clientID string) error { + if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID { + return common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") + } + + fileChanger := &allocation.AddFileChanger{} + + uploadMetaString := req.FormValue("uploadMeta") + err := json.Unmarshal([]byte(uploadMetaString), fileChanger) + if err != nil { + return common.NewError("invalid_parameters", + "Invalid parameters. Error parsing the meta data for upload."+err.Error()) + } + exisitingFileRef, _ := reference.GetReference(ctx, allocationObj.ID, fileChanger.Path) + + if exisitingFileRef != nil { + return common.NewError("duplicate_file", "File at path already exists") + } + + //create a FixedMerkleTree instance first, it will be reloaded from db in cmd.reloadChange if it is not first chunk + //cmd.fileChanger.FixedMerkleTree = &util.FixedMerkleTree{} + + if fileChanger.ChunkSize <= 0 { + fileChanger.ChunkSize = fileref.CHUNK_SIZE + } + + cmd.fileChanger = fileChanger + + return nil + +} + +// ProcessContent flush file to FileStorage +func (cmd *AddFileCommand) ProcessContent(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) (blobberhttp.UploadResult, error) { + result := blobberhttp.UploadResult{} + + origfile, _, err := req.FormFile("uploadFile") + if err != nil { + return result, common.NewError("invalid_parameters", "Error Reading multi parts for file."+err.Error()) + } + defer origfile.Close() + + cmd.reloadChange(connectionObj) + + fileInputData := &filestore.FileInputData{ + Name: cmd.fileChanger.Filename, + Path: cmd.fileChanger.Path, + OnCloud: false, + + ChunkSize: cmd.fileChanger.ChunkSize, + UploadOffset: cmd.fileChanger.UploadOffset, + IsChunked: true, + IsFinal: cmd.fileChanger.IsFinal, + } + fileOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, fileInputData, origfile, connectionObj.ConnectionID) + if err != nil { + return result, common.NewError("upload_error", "Failed to upload the file. "+err.Error()) + } + + result.Filename = cmd.fileChanger.Filename + result.Hash = fileOutputData.ContentHash + //result.MerkleRoot = fileOutputData.MerkleRoot + result.Size = fileOutputData.Size + + allocationSize := connectionObj.Size + + // only update connection size when the chunk is uploaded by first time. + if !fileOutputData.ChunkUploaded { + allocationSize += fileOutputData.Size + } + + if allocationSize > config.Configuration.MaxFileSize { + return result, common.NewError("file_size_limit_exceeded", "Size for the given file is larger than the max limit") + } + + if allocationObj.BlobberSizeUsed+allocationSize > allocationObj.BlobberSize { + return result, common.NewError("max_allocation_size", "Max size reached for the allocation with this blobber") + } + + if len(cmd.fileChanger.ChunkHash) > 0 && cmd.fileChanger.ChunkHash != fileOutputData.ContentHash { + return result, common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the file content") + } + + // Save client's ContentHash in database instead blobber's + // it saves time to read and compute hash of fragment from disk again + //cmd.fileChanger.Hash = fileOutputData.ContentHash + + cmd.fileChanger.AllocationID = allocationObj.ID + cmd.fileChanger.Size = allocationSize + + cmd.allocationChange = &allocation.AllocationChange{} + cmd.allocationChange.ConnectionID = connectionObj.ConnectionID + cmd.allocationChange.Size = allocationSize + cmd.allocationChange.Operation = constants.FileOperationInsert + + connectionObj.Size = allocationSize + + return result, nil +} + +// ProcessThumbnail flush thumbnail file to FileStorage if it has. +func (cmd *AddFileCommand) ProcessThumbnail(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) error { + + thumbfile, thumbHeader, _ := req.FormFile("uploadThumbnailFile") + + if thumbHeader != nil { + + defer thumbfile.Close() + + thumbInputData := &filestore.FileInputData{Name: thumbHeader.Filename, Path: cmd.fileChanger.Path} + thumbOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, thumbInputData, thumbfile, connectionObj.ConnectionID) + if err != nil { + return common.NewError("upload_error", "Failed to upload the thumbnail. "+err.Error()) + } + if cmd.fileChanger.ThumbnailHash != thumbOutputData.ContentHash { + return common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the thumbnail content") + } + + cmd.fileChanger.ThumbnailHash = thumbOutputData.ContentHash + cmd.fileChanger.ThumbnailSize = thumbOutputData.Size + cmd.fileChanger.ThumbnailFilename = thumbInputData.Name + } + + return nil +} + +func (cmd *AddFileCommand) reloadChange(connectionObj *allocation.AllocationChangeCollector) { + for _, c := range connectionObj.Changes { + if c.Operation == constants.FileOperationInsert { + + dbChangeProcessor := &allocation.AddFileChanger{} + + err := dbChangeProcessor.Unmarshal(c.Input) + if err != nil { + logging.Logger.Error("reloadChange", zap.Error(err)) + } + + cmd.fileChanger.Size = dbChangeProcessor.Size + return + } + } +} + +// UpdateChange replace AddFileChange in db +func (cmd *AddFileCommand) UpdateChange(ctx context.Context, connectionObj *allocation.AllocationChangeCollector) error { + for _, c := range connectionObj.Changes { + if c.Operation == constants.FileOperationInsert { + c.Size = connectionObj.Size + c.Input, _ = cmd.fileChanger.Marshal() + + //c.ModelWithTS.UpdatedAt = time.Now() + err := connectionObj.Save(ctx) + if err != nil { + return err + } + + return c.Save(ctx) + } + } + + //NOT FOUND + connectionObj.AddChange(cmd.allocationChange, cmd.fileChanger) + + return connectionObj.Save(ctx) +} diff --git a/code/go/0chain.net/blobbercore/handler/file_command_delete.go b/code/go/0chain.net/blobbercore/handler/file_command_delete.go new file mode 100644 index 000000000..f33ae7014 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/file_command_delete.go @@ -0,0 +1,77 @@ +package handler + +import ( + "context" + "net/http" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobberhttp" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" + "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/gosdk/constants" +) + +// FileCommandDelete command for deleting file +type FileCommandDelete struct { + exisitingFileRef *reference.Ref + changeProcessor *allocation.DeleteFileChange + allocationChange *allocation.AllocationChange +} + +// IsAuthorized validate request. +func (cmd *FileCommandDelete) IsAuthorized(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, clientID string) error { + if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID { + return common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") + } + + path := req.FormValue("path") + if len(path) == 0 { + return common.NewError("invalid_parameters", "Invalid path") + } + cmd.exisitingFileRef, _ = reference.GetReference(ctx, allocationObj.ID, path) + + if cmd.exisitingFileRef == nil { + return common.NewError("invalid_file", "File does not exist at path") + } + + return nil +} + +// UpdateChange add DeleteFileChange in db +func (cmd *FileCommandDelete) UpdateChange(ctx context.Context, connectionObj *allocation.AllocationChangeCollector) error { + connectionObj.AddChange(cmd.allocationChange, cmd.changeProcessor) + + return connectionObj.Save(ctx) +} + +// ProcessContent flush file to FileStorage +func (cmd *FileCommandDelete) ProcessContent(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) (blobberhttp.UploadResult, error) { + + deleteSize := cmd.exisitingFileRef.Size + + cmd.changeProcessor = &allocation.DeleteFileChange{ConnectionID: connectionObj.ConnectionID, + AllocationID: connectionObj.AllocationID, Name: cmd.exisitingFileRef.Name, + Hash: cmd.exisitingFileRef.Hash, Path: cmd.exisitingFileRef.Path, Size: deleteSize} + + result := blobberhttp.UploadResult{} + result.Filename = cmd.exisitingFileRef.Name + result.Hash = cmd.exisitingFileRef.Hash + result.MerkleRoot = cmd.exisitingFileRef.MerkleRoot + result.Size = cmd.exisitingFileRef.Size + + cmd.allocationChange = &allocation.AllocationChange{} + cmd.allocationChange.ConnectionID = connectionObj.ConnectionID + cmd.allocationChange.Size = 0 - deleteSize + cmd.allocationChange.Operation = constants.FileOperationDelete + + connectionObj.Size += cmd.allocationChange.Size + + return result, nil + +} + +// ProcessThumbnail no thumbnail should be processed for delete. A deffered delete command has been added on ProcessContent +func (cmd *FileCommandDelete) ProcessThumbnail(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) error { + //DO NOTHING + return nil +} diff --git a/code/go/0chain.net/blobbercore/handler/file_command_insert.go b/code/go/0chain.net/blobbercore/handler/file_command_insert.go new file mode 100644 index 000000000..81e5de259 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/file_command_insert.go @@ -0,0 +1,139 @@ +package handler + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobberhttp" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" + "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/gosdk/constants" + "github.com/0chain/gosdk/zboxcore/fileref" +) + +// InsertFileCommand command for inserting file +type InsertFileCommand struct { + allocationChange *allocation.AllocationChange + changeProcessor *allocation.UpdateFileChanger +} + +// IsAuthorized validate request. +func (cmd *InsertFileCommand) IsAuthorized(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, clientID string) error { + + if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID { + return common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") + } + + changeProcessor := &allocation.UpdateFileChanger{} + + uploadMetaString := req.FormValue("uploadMeta") + err := json.Unmarshal([]byte(uploadMetaString), changeProcessor) + if err != nil { + return common.NewError("invalid_parameters", + "Invalid parameters. Error parsing the meta data for upload."+err.Error()) + } + exisitingFileRef, _ := reference.GetReference(ctx, allocationObj.ID, changeProcessor.Path) + + if exisitingFileRef != nil { + return common.NewError("duplicate_file", "File at path already exists") + } + + if changeProcessor.ChunkSize <= 0 { + changeProcessor.ChunkSize = fileref.CHUNK_SIZE + } + + cmd.changeProcessor = changeProcessor + + return nil +} + +// ProcessContent flush file to FileStorage +func (cmd *InsertFileCommand) ProcessContent(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) (blobberhttp.UploadResult, error) { + + result := blobberhttp.UploadResult{} + + origfile, _, err := req.FormFile("uploadFile") + if err != nil { + return result, common.NewError("invalid_parameters", "Error Reading multi parts for file."+err.Error()) + } + defer origfile.Close() + + fileInputData := &filestore.FileInputData{Name: cmd.changeProcessor.Filename, Path: cmd.changeProcessor.Path, OnCloud: false} + fileOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, fileInputData, origfile, connectionObj.ConnectionID) + if err != nil { + return result, common.NewError("upload_error", "Failed to upload the file. "+err.Error()) + } + + result.Filename = cmd.changeProcessor.Filename + result.Hash = fileOutputData.ContentHash + result.MerkleRoot = fileOutputData.MerkleRoot + result.Size = fileOutputData.Size + + if len(cmd.changeProcessor.Hash) > 0 && cmd.changeProcessor.Hash != fileOutputData.ContentHash { + return result, common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the file content") + } + if len(cmd.changeProcessor.MerkleRoot) > 0 && cmd.changeProcessor.MerkleRoot != fileOutputData.MerkleRoot { + return result, common.NewError("content_merkle_root_mismatch", "Merkle root provided in the meta data does not match the file content") + } + if fileOutputData.Size > config.Configuration.MaxFileSize { + return result, common.NewError("file_size_limit_exceeded", "Size for the given file is larger than the max limit") + } + + cmd.changeProcessor.Hash = fileOutputData.ContentHash + cmd.changeProcessor.MerkleRoot = fileOutputData.MerkleRoot + cmd.changeProcessor.AllocationID = allocationObj.ID + cmd.changeProcessor.Size = fileOutputData.Size + + allocationSize := fileOutputData.Size + + if allocationObj.BlobberSizeUsed+allocationSize > allocationObj.BlobberSize { + return result, common.NewError("max_allocation_size", "Max size reached for the allocation with this blobber") + } + + cmd.allocationChange = &allocation.AllocationChange{} + cmd.allocationChange.ConnectionID = connectionObj.ConnectionID + cmd.allocationChange.Size = allocationSize + cmd.allocationChange.Operation = constants.FileOperationInsert + + connectionObj.Size += cmd.allocationChange.Size + + return result, nil + +} + +// ProcessThumbnail flush thumbnail file to FileStorage if it has. +func (cmd *InsertFileCommand) ProcessThumbnail(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) error { + + thumbfile, thumbHeader, _ := req.FormFile("uploadThumbnailFile") + + if thumbHeader != nil { + + defer thumbfile.Close() + + thumbInputData := &filestore.FileInputData{Name: thumbHeader.Filename, Path: cmd.changeProcessor.Path} + thumbOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, thumbInputData, thumbfile, connectionObj.ConnectionID) + if err != nil { + return common.NewError("upload_error", "Failed to upload the thumbnail. "+err.Error()) + } + if len(cmd.changeProcessor.ThumbnailHash) > 0 && cmd.changeProcessor.ThumbnailHash != thumbOutputData.ContentHash { + return common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the thumbnail content") + } + + cmd.changeProcessor.ThumbnailHash = thumbOutputData.ContentHash + cmd.changeProcessor.ThumbnailSize = thumbOutputData.Size + cmd.changeProcessor.ThumbnailFilename = thumbInputData.Name + } + + return nil + +} + +// UpdateChange add NewFileChange in db +func (cmd *InsertFileCommand) UpdateChange(ctx context.Context, connectionObj *allocation.AllocationChangeCollector) error { + connectionObj.AddChange(cmd.allocationChange, cmd.changeProcessor) + return connectionObj.Save(ctx) +} diff --git a/code/go/0chain.net/blobbercore/handler/file_command_update.go b/code/go/0chain.net/blobbercore/handler/file_command_update.go new file mode 100644 index 000000000..fbae2b6f6 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/file_command_update.go @@ -0,0 +1,200 @@ +package handler + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobberhttp" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" + "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/0chain/gosdk/constants" + sdkConstants "github.com/0chain/gosdk/constants" + "github.com/0chain/gosdk/zboxcore/fileref" + "go.uber.org/zap" +) + +// UpdateFileCommand command for updating file +type UpdateFileCommand struct { + exisitingFileRef *reference.Ref + fileChanger *allocation.UpdateFileChanger + allocationChange *allocation.AllocationChange +} + +// IsAuthorized validate request. +func (cmd *UpdateFileCommand) IsAuthorized(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, clientID string) error { + uploadMetaString := req.FormValue("uploadMeta") + + if uploadMetaString == "" { + // backward compatibility for old update request + uploadMetaString = req.FormValue("updatedMeta") + } + + err := json.Unmarshal([]byte(uploadMetaString), &cmd.fileChanger) + if err != nil { + return common.NewError("invalid_parameters", + "Invalid parameters. Error parsing the meta data for upload."+err.Error()) + } + + if cmd.fileChanger.ChunkSize <= 0 { + cmd.fileChanger.ChunkSize = fileref.CHUNK_SIZE + } + + cmd.exisitingFileRef, _ = reference.GetReference(ctx, allocationObj.ID, cmd.fileChanger.Path) + + if cmd.exisitingFileRef == nil { + return common.NewError("invalid_file_update", "File at path does not exist for update") + } + + if allocationObj.OwnerID != clientID && + allocationObj.RepairerID != clientID && + !reference.IsACollaborator(ctx, cmd.exisitingFileRef.ID, clientID) { + return common.NewError("invalid_operation", "Operation needs to be performed by the owner, collaborator or the payer of the allocation") + } + + return nil +} + +// ProcessContent flush file to FileStorage +func (cmd *UpdateFileCommand) ProcessContent(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) (blobberhttp.UploadResult, error) { + + result := blobberhttp.UploadResult{} + + origfile, _, err := req.FormFile("uploadFile") + if err != nil { + return result, common.NewError("invalid_parameters", "Error Reading multi parts for file."+err.Error()) + } + defer origfile.Close() + + cmd.reloadChange(connectionObj) + + fileInputData := &filestore.FileInputData{ + Name: cmd.fileChanger.Filename, + Path: cmd.fileChanger.Path, + OnCloud: cmd.exisitingFileRef.OnCloud, + + UploadOffset: cmd.fileChanger.UploadOffset, + IsChunked: cmd.fileChanger.ChunkSize > 0, + IsFinal: cmd.fileChanger.IsFinal, + } + fileOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, fileInputData, origfile, connectionObj.ConnectionID) + if err != nil { + return result, common.NewError("upload_error", "Failed to upload the file. "+err.Error()) + } + + result.Filename = cmd.fileChanger.Filename + result.Hash = fileOutputData.ContentHash + //result.MerkleRoot = fileOutputData.MerkleRoot + result.Size = fileOutputData.Size + + allocationSize := connectionObj.Size + + // only update connection size when the chunk is uploaded by first time. + if !fileOutputData.ChunkUploaded { + allocationSize += fileOutputData.Size + } + + if len(cmd.fileChanger.ChunkHash) > 0 && cmd.fileChanger.ChunkHash != fileOutputData.ContentHash { + return result, common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the file content") + } + + // if len(cmd.fileChanger.MerkleRoot) > 0 && cmd.fileChanger.MerkleRoot != fileOutputData.MerkleRoot { + // return result, common.NewError("content_merkle_root_mismatch", "Merkle root provided in the meta data does not match the file content") + // } + + if allocationObj.BlobberSizeUsed+(allocationSize-cmd.exisitingFileRef.Size) > allocationObj.BlobberSize { + return result, common.NewError("max_allocation_size", "Max size reached for the allocation with this blobber") + } + + if fileOutputData.Size > config.Configuration.MaxFileSize { + return result, common.NewError("file_size_limit_exceeded", "Size for the given file is larger than the max limit") + } + + cmd.fileChanger.AllocationID = allocationObj.ID + cmd.fileChanger.Size = allocationSize + + cmd.allocationChange = &allocation.AllocationChange{} + cmd.allocationChange.ConnectionID = connectionObj.ConnectionID + cmd.allocationChange.Size = allocationSize - cmd.exisitingFileRef.Size + cmd.allocationChange.Operation = sdkConstants.FileOperationUpdate + + if cmd.fileChanger.IsFinal { + connectionObj.Size = allocationSize - cmd.exisitingFileRef.Size + } else { + connectionObj.Size = allocationSize + } + + return result, nil + +} + +// ProcessThumbnail flush thumbnail file to FileStorage if it has. +func (cmd *UpdateFileCommand) ProcessThumbnail(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) error { + + thumbfile, thumbHeader, _ := req.FormFile("uploadThumbnailFile") + + if thumbHeader != nil { + + defer thumbfile.Close() + + thumbInputData := &filestore.FileInputData{Name: thumbHeader.Filename, Path: cmd.fileChanger.Path} + thumbOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, thumbInputData, thumbfile, connectionObj.ConnectionID) + if err != nil { + return common.NewError("upload_error", "Failed to upload the thumbnail. "+err.Error()) + } + if len(cmd.fileChanger.ThumbnailHash) > 0 && cmd.fileChanger.ThumbnailHash != thumbOutputData.ContentHash { + return common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the thumbnail content") + } + cmd.fileChanger.ThumbnailHash = thumbOutputData.ContentHash + cmd.fileChanger.ThumbnailSize = thumbOutputData.Size + cmd.fileChanger.ThumbnailFilename = thumbInputData.Name + } + + return nil + +} + +func (cmd *UpdateFileCommand) reloadChange(connectionObj *allocation.AllocationChangeCollector) { + for _, c := range connectionObj.Changes { + if c.Operation == constants.FileOperationUpdate { + + dbFileChanger := &allocation.AddFileChanger{} + + err := dbFileChanger.Unmarshal(c.Input) + if err != nil { + logging.Logger.Error("reloadChange", zap.Error(err)) + } + + // reload uploaded size from db, it was chunk size from client + cmd.fileChanger.Size = dbFileChanger.Size + return + } + } +} + +// UpdateChange add UpdateFileChanger in db +func (cmd *UpdateFileCommand) UpdateChange(ctx context.Context, connectionObj *allocation.AllocationChangeCollector) error { + for _, c := range connectionObj.Changes { + if c.Operation == constants.FileOperationUpdate { + c.Size = connectionObj.Size + c.Input, _ = cmd.fileChanger.Marshal() + + //c.ModelWithTS.UpdatedAt = time.Now() + err := connectionObj.Save(ctx) + if err != nil { + return err + } + + return c.Save(ctx) + } + } + + //NOT FOUND + connectionObj.AddChange(cmd.allocationChange, cmd.fileChanger) + + return connectionObj.Save(ctx) +} diff --git a/code/go/0chain.net/blobbercore/handler/file_command_update_bak.go b/code/go/0chain.net/blobbercore/handler/file_command_update_bak.go new file mode 100644 index 000000000..6bc66f690 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/file_command_update_bak.go @@ -0,0 +1,138 @@ +package handler + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobberhttp" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" + "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/gosdk/constants" + "github.com/0chain/gosdk/zboxcore/fileref" +) + +// UpdateFileCMD command for updating file +type UpdateFileCMD struct { + exisitingFileRef *reference.Ref + changeProcessor *allocation.UpdateFileChanger + allocationChange *allocation.AllocationChange +} + +// IsAuthorized validate request. +func (cmd *UpdateFileCMD) IsAuthorized(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, clientID string) error { + uploadMetaString := req.FormValue("updateMeta") + err := json.Unmarshal([]byte(uploadMetaString), &cmd.changeProcessor) + if err != nil { + return common.NewError("invalid_parameters", + "Invalid parameters. Error parsing the meta data for upload."+err.Error()) + } + + if cmd.changeProcessor.ChunkSize <= 0 { + cmd.changeProcessor.ChunkSize = fileref.CHUNK_SIZE + } + + cmd.exisitingFileRef, _ = reference.GetReference(ctx, allocationObj.ID, cmd.changeProcessor.Path) + + if cmd.exisitingFileRef == nil { + return common.NewError("invalid_file_update", "File at path does not exist for update") + } + + if allocationObj.OwnerID != clientID && + allocationObj.RepairerID != clientID && + !reference.IsACollaborator(ctx, cmd.exisitingFileRef.ID, clientID) { + return common.NewError("invalid_operation", "Operation needs to be performed by the owner, collaborator or the payer of the allocation") + } + + return nil +} + +// ProcessContent flush file to FileStorage +func (cmd *UpdateFileCMD) ProcessContent(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) (blobberhttp.UploadResult, error) { + + result := blobberhttp.UploadResult{} + + origfile, _, err := req.FormFile("uploadFile") + if err != nil { + return result, common.NewError("invalid_parameters", "Error Reading multi parts for file."+err.Error()) + } + defer origfile.Close() + + fileInputData := &filestore.FileInputData{Name: cmd.changeProcessor.Filename, Path: cmd.changeProcessor.Path, OnCloud: cmd.exisitingFileRef.OnCloud} + fileOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, fileInputData, origfile, connectionObj.ConnectionID) + if err != nil { + return result, common.NewError("upload_error", "Failed to upload the file. "+err.Error()) + } + + result.Filename = cmd.changeProcessor.Filename + result.Hash = fileOutputData.ContentHash + result.MerkleRoot = fileOutputData.MerkleRoot + result.Size = fileOutputData.Size + + if len(cmd.changeProcessor.Hash) > 0 && cmd.changeProcessor.Hash != fileOutputData.ContentHash { + return result, common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the file content") + } + if len(cmd.changeProcessor.MerkleRoot) > 0 && cmd.changeProcessor.MerkleRoot != fileOutputData.MerkleRoot { + return result, common.NewError("content_merkle_root_mismatch", "Merkle root provided in the meta data does not match the file content") + } + if fileOutputData.Size > config.Configuration.MaxFileSize { + return result, common.NewError("file_size_limit_exceeded", "Size for the given file is larger than the max limit") + } + + cmd.changeProcessor.Hash = fileOutputData.ContentHash + cmd.changeProcessor.MerkleRoot = fileOutputData.MerkleRoot + cmd.changeProcessor.AllocationID = allocationObj.ID + cmd.changeProcessor.Size = fileOutputData.Size + + allocationSize := fileOutputData.Size + + if allocationObj.BlobberSizeUsed+(allocationSize-cmd.exisitingFileRef.Size) > allocationObj.BlobberSize { + return result, common.NewError("max_allocation_size", "Max size reached for the allocation with this blobber") + } + + cmd.allocationChange = &allocation.AllocationChange{} + cmd.allocationChange.ConnectionID = connectionObj.ConnectionID + cmd.allocationChange.Size = allocationSize - cmd.exisitingFileRef.Size + cmd.allocationChange.Operation = constants.FileOperationUpdate + + connectionObj.Size += cmd.allocationChange.Size + + return result, nil + +} + +// ProcessThumbnail flush thumbnail file to FileStorage if it has. +func (cmd *UpdateFileCMD) ProcessThumbnail(ctx context.Context, req *http.Request, allocationObj *allocation.Allocation, connectionObj *allocation.AllocationChangeCollector) error { + + thumbfile, thumbHeader, _ := req.FormFile("uploadThumbnailFile") + + if thumbHeader != nil { + + defer thumbfile.Close() + + thumbInputData := &filestore.FileInputData{Name: thumbHeader.Filename, Path: cmd.changeProcessor.Path} + thumbOutputData, err := filestore.GetFileStore().WriteFile(allocationObj.ID, thumbInputData, thumbfile, connectionObj.ConnectionID) + if err != nil { + return common.NewError("upload_error", "Failed to upload the thumbnail. "+err.Error()) + } + if len(cmd.changeProcessor.ThumbnailHash) > 0 && cmd.changeProcessor.ThumbnailHash != thumbOutputData.ContentHash { + return common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the thumbnail content") + } + cmd.changeProcessor.ThumbnailHash = thumbOutputData.ContentHash + cmd.changeProcessor.ThumbnailSize = thumbOutputData.Size + cmd.changeProcessor.ThumbnailFilename = thumbInputData.Name + } + + return nil + +} + +// UpdateChange add UpdateFileChanger in db +func (cmd *UpdateFileCMD) UpdateChange(ctx context.Context, connectionObj *allocation.AllocationChangeCollector) error { + connectionObj.AddChange(cmd.allocationChange, cmd.changeProcessor) + + return connectionObj.Save(ctx) +} diff --git a/code/go/0chain.net/blobbercore/handler/handler.go b/code/go/0chain.net/blobbercore/handler/handler.go index 80f7e4439..460bcd98c 100644 --- a/code/go/0chain.net/blobbercore/handler/handler.go +++ b/code/go/0chain.net/blobbercore/handler/handler.go @@ -19,17 +19,17 @@ import ( "go.uber.org/zap" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/stats" "github.com/0chain/blobber/code/go/0chain.net/core/common" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/0chain/gosdk/constants" "github.com/gorilla/mux" ) var storageHandler StorageHandler -func GetMetaDataStore() *datastore.Store { +func GetMetaDataStore() datastore.Store { return datastore.GetStore() } @@ -114,14 +114,14 @@ func WithConnection(handler common.JSONResponderF) common.JSONResponderF { func setupHandlerContext(ctx context.Context, r *http.Request) context.Context { var vars = mux.Vars(r) - ctx = context.WithValue(ctx, constants.CLIENT_CONTEXT_KEY, + ctx = context.WithValue(ctx, constants.ContextKeyClient, r.Header.Get(common.ClientHeader)) - ctx = context.WithValue(ctx, constants.CLIENT_KEY_CONTEXT_KEY, + ctx = context.WithValue(ctx, constants.ContextKeyClientKey, r.Header.Get(common.ClientKeyHeader)) - ctx = context.WithValue(ctx, constants.ALLOCATION_CONTEXT_KEY, + ctx = context.WithValue(ctx, constants.ContextKeyAllocation, vars["allocation"]) // signature is not requered for all requests, but if header is empty it won`t affect anything - ctx = context.WithValue(ctx, constants.CLIENT_SIGNATURE_HEADER_KEY, r.Header.Get(common.ClientSignatureHeader)) + ctx = context.WithValue(ctx, constants.ContextKeyClientSignatureHeaderKey, r.Header.Get(common.ClientSignatureHeader)) return ctx } @@ -354,7 +354,7 @@ func CleanupDiskHandler(ctx context.Context, r *http.Request) (interface{}, erro func RevokeShare(ctx context.Context, r *http.Request) (interface{}, error) { ctx = setupHandlerContext(ctx, r) - allocationID := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationID := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := storageHandler.verifyAllocation(ctx, allocationID, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) @@ -377,7 +377,7 @@ func RevokeShare(ctx context.Context, r *http.Request) (interface{}, error) { if err != nil { return nil, common.NewError("invalid_parameters", "Invalid file path. "+err.Error()) } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if clientID != allocationObj.OwnerID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } @@ -405,7 +405,7 @@ func RevokeShare(ctx context.Context, r *http.Request) (interface{}, error) { func InsertShare(ctx context.Context, r *http.Request) (interface{}, error) { ctx = setupHandlerContext(ctx, r) - allocationID := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationID := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := storageHandler.verifyAllocation(ctx, allocationID, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) diff --git a/code/go/0chain.net/blobbercore/handler/handler_integration_tests.go b/code/go/0chain.net/blobbercore/handler/handler_integration_tests.go index 9f67e0ab5..f5f1f4d6a 100644 --- a/code/go/0chain.net/blobbercore/handler/handler_integration_tests.go +++ b/code/go/0chain.net/blobbercore/handler/handler_integration_tests.go @@ -14,11 +14,11 @@ import ( "runtime/pprof" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/stats" "github.com/0chain/blobber/code/go/0chain.net/core/common" "github.com/0chain/blobber/code/go/0chain.net/core/node" + "github.com/0chain/gosdk/constants" "github.com/gorilla/mux" @@ -28,7 +28,7 @@ import ( var storageHandler StorageHandler -func GetMetaDataStore() *datastore.Store { +func GetMetaDataStore() datastore.Store { return datastore.GetStore() } @@ -96,11 +96,11 @@ func WithConnection(handler common.JSONResponderF) common.JSONResponderF { func setupHandlerContext(ctx context.Context, r *http.Request) context.Context { var vars = mux.Vars(r) - ctx = context.WithValue(ctx, constants.CLIENT_CONTEXT_KEY, + ctx = context.WithValue(ctx, constants.ContextKeyClient, r.Header.Get(common.ClientHeader)) - ctx = context.WithValue(ctx, constants.CLIENT_KEY_CONTEXT_KEY, + ctx = context.WithValue(ctx, constants.ContextKeyClientKey, r.Header.Get(common.ClientKeyHeader)) - ctx = context.WithValue(ctx, constants.ALLOCATION_CONTEXT_KEY, + ctx = context.WithValue(ctx, constants.ContextKeyAllocation, vars["allocation"]) return ctx } diff --git a/code/go/0chain.net/blobbercore/handler/handler_test.go b/code/go/0chain.net/blobbercore/handler/handler_test.go index 25aa3f045..7d0a3ecd2 100644 --- a/code/go/0chain.net/blobbercore/handler/handler_test.go +++ b/code/go/0chain.net/blobbercore/handler/handler_test.go @@ -50,8 +50,7 @@ func (MockFileBlockGetter) GetFileBlock( allocationID string, fileData *filestore.FileInputData, blockNum int64, - numBlocks int64, -) ([]byte, error) { + numBlocks int64) ([]byte, error) { return []byte(mockFileBlock), nil } @@ -68,7 +67,7 @@ var encscheme zencryption.EncryptionScheme func setupEncryptionScheme() { encscheme = zencryption.NewEncryptionScheme() mnemonic := client.GetClient().Mnemonic - if err := encscheme.Initialize(mnemonic); err != nil { + if _, err := encscheme.Initialize(mnemonic); err != nil { panic("initialize encscheme") } encscheme.InitForEncryption("filetype:audio") @@ -1014,7 +1013,7 @@ func TestHandlers_Requiring_Signature(t *testing.T) { } q := url.Query() - formFieldByt, err := json.Marshal(&allocation.UpdateFileChange{}) + formFieldByt, err := json.Marshal(&allocation.UpdateFileChanger{}) if err != nil { t.Fatal(err) } diff --git a/code/go/0chain.net/blobbercore/handler/helper_integration_test.go b/code/go/0chain.net/blobbercore/handler/helper_integration_test.go index 6a5f0e18c..3a744b396 100644 --- a/code/go/0chain.net/blobbercore/handler/helper_integration_test.go +++ b/code/go/0chain.net/blobbercore/handler/helper_integration_test.go @@ -4,13 +4,13 @@ import ( "context" "database/sql" "fmt" - blobbergrpc "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobbergrpc/proto" "log" "math/rand" "os" "strings" "time" + blobbergrpc "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobbergrpc/proto" "google.golang.org/grpc" "gorm.io/driver/postgres" diff --git a/code/go/0chain.net/blobbercore/handler/object_operation_handler.go b/code/go/0chain.net/blobbercore/handler/object_operation_handler.go index b2c1b26de..e1bf32d2d 100644 --- a/code/go/0chain.net/blobbercore/handler/object_operation_handler.go +++ b/code/go/0chain.net/blobbercore/handler/object_operation_handler.go @@ -20,7 +20,6 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/readmarker" @@ -30,6 +29,7 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/core/encryption" "github.com/0chain/blobber/code/go/0chain.net/core/lock" "github.com/0chain/blobber/code/go/0chain.net/core/node" + "github.com/0chain/gosdk/constants" zfileref "github.com/0chain/gosdk/zboxcore/fileref" "gorm.io/datatypes" @@ -185,9 +185,9 @@ func (fsh *StorageHandler) DownloadFile( // get client and allocation ids var ( - clientID = ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) - allocationTx = ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) // runtime type check + clientID = ctx.Value(constants.ContextKeyClient).(string) + allocationTx = ctx.Value(constants.ContextKeyAllocation).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) // runtime type check alloc *allocation.Allocation ) @@ -396,6 +396,7 @@ func (fsh *StorageHandler) DownloadFile( fileData.Path = fileref.Path fileData.Hash = fileref.ThumbnailHash fileData.OnCloud = fileref.OnCloud + fileData.ChunkSize = fileref.ChunkSize respData, err = filestore.GetFileStore().GetFileBlock(alloc.ID, fileData, blockNum, numBlocks) if err != nil { @@ -408,6 +409,8 @@ func (fsh *StorageHandler) DownloadFile( fileData.Path = fileref.Path fileData.Hash = fileref.ContentHash fileData.OnCloud = fileref.OnCloud + fileData.ChunkSize = fileref.ChunkSize + respData, err = filestore.GetFileStore().GetFileBlock(alloc.ID, fileData, blockNum, numBlocks) if err != nil { @@ -451,7 +454,7 @@ func (fsh *StorageHandler) DownloadFile( // reEncrypt does not require pub / private key, // we could probably make it a classless function - if err := encscheme.Initialize(""); err != nil { + if _, err := encscheme.Initialize(""); err != nil { return nil, err } if err := encscheme.InitForDecryption("filetype:audio", fileref.EncryptedKey); err != nil { @@ -502,9 +505,9 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*b if r.Method == "GET" { return nil, common.NewError("invalid_method", "Invalid method used for the upload URL. Use POST instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) - clientKey := ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) + clientKey := ctx.Value(constants.ContextKeyClientKey).(string) clientKeyBytes, _ := hex.DecodeString(clientKey) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) @@ -539,8 +542,8 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*b var isCollaborator bool for _, change := range connectionObj.Changes { - if change.Operation == allocation.UPDATE_OPERATION { - updateFileChange := new(allocation.UpdateFileChange) + if change.Operation == constants.FileOperationUpdate { + updateFileChange := new(allocation.UpdateFileChanger) if err := updateFileChange.Unmarshal(change.Input); err != nil { return nil, err } @@ -624,6 +627,7 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*b if err != nil { return nil, err } + allocationRoot := encryption.Hash(rootRef.Hash + ":" + strconv.FormatInt(int64(writeMarker.Timestamp), 10)) if allocationRoot != writeMarker.AllocationRoot { @@ -677,7 +681,7 @@ func (fsh *StorageHandler) RenameObject(ctx context.Context, r *http.Request) (i if r.Method == "GET" { return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) @@ -689,8 +693,8 @@ func (fsh *StorageHandler) RenameObject(ctx context.Context, r *http.Request) (i allocationID := allocationObj.ID - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) valid, err := verifySignatureFromRequest(allocationTx, r.Header.Get(common.ClientSignatureHeader), allocationObj.OwnerPublicKey) if !valid || err != nil { @@ -738,7 +742,7 @@ func (fsh *StorageHandler) RenameObject(ctx context.Context, r *http.Request) (i allocationChange := &allocation.AllocationChange{} allocationChange.ConnectionID = connectionObj.ConnectionID allocationChange.Size = 0 - allocationChange.Operation = allocation.RENAME_OPERATION + allocationChange.Operation = constants.FileOperationRename dfc := &allocation.RenameFileChange{ConnectionID: connectionObj.ConnectionID, AllocationID: connectionObj.AllocationID, Path: objectRef.Path} dfc.NewName = new_name @@ -769,8 +773,8 @@ func (fsh *StorageHandler) UpdateObjectAttributes(ctx context.Context, } var ( - allocTx = ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) - clientID = ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + allocTx = ctx.Value(constants.ContextKeyAllocation).(string) + clientID = ctx.Value(constants.ContextKeyClient).(string) alloc *allocation.Allocation ) @@ -790,7 +794,7 @@ func (fsh *StorageHandler) UpdateObjectAttributes(ctx context.Context, } // runtime type check - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) if clientID == "" { return nil, common.NewError("update_object_attributes", @@ -847,7 +851,7 @@ func (fsh *StorageHandler) UpdateObjectAttributes(ctx context.Context, var change = new(allocation.AllocationChange) change.ConnectionID = conn.ConnectionID - change.Operation = allocation.UPDATE_ATTRS_OPERATION + change.Operation = constants.FileOperationUpdateAttrs var uafc = &allocation.AttributesChange{ ConnectionID: conn.ConnectionID, @@ -874,7 +878,7 @@ func (fsh *StorageHandler) CopyObject(ctx context.Context, r *http.Request) (int if r.Method == "GET" { return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) @@ -889,8 +893,8 @@ func (fsh *StorageHandler) CopyObject(ctx context.Context, r *http.Request) (int return nil, common.NewError("immutable_allocation", "Cannot copy data in an immutable allocation") } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) allocationID := allocationObj.ID @@ -945,7 +949,7 @@ func (fsh *StorageHandler) CopyObject(ctx context.Context, r *http.Request) (int allocationChange := &allocation.AllocationChange{} allocationChange.ConnectionID = connectionObj.ConnectionID allocationChange.Size = objectRef.Size - allocationChange.Operation = allocation.COPY_OPERATION + allocationChange.Operation = constants.FileOperationCopy dfc := &allocation.CopyFileChange{ConnectionID: connectionObj.ConnectionID, AllocationID: connectionObj.AllocationID, DestPath: destPath} dfc.SrcPath = objectRef.Path @@ -974,14 +978,14 @@ func (fsh *StorageHandler) DeleteFile(ctx context.Context, r *http.Request, conn } fileRef, _ := reference.GetReference(ctx, connectionObj.AllocationID, path) - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) if fileRef != nil { deleteSize := fileRef.Size allocationChange := &allocation.AllocationChange{} allocationChange.ConnectionID = connectionObj.ConnectionID allocationChange.Size = 0 - deleteSize - allocationChange.Operation = allocation.DELETE_OPERATION + allocationChange.Operation = constants.FileOperationDelete dfc := &allocation.DeleteFileChange{ConnectionID: connectionObj.ConnectionID, AllocationID: connectionObj.AllocationID, Name: fileRef.Name, Hash: fileRef.Hash, Path: fileRef.Path, Size: deleteSize} @@ -1002,8 +1006,8 @@ func (fsh *StorageHandler) DeleteFile(ctx context.Context, r *http.Request, conn } func (fsh *StorageHandler) CreateDir(ctx context.Context, r *http.Request) (*blobberhttp.UploadResult, error) { - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { @@ -1052,7 +1056,7 @@ func (fsh *StorageHandler) CreateDir(ctx context.Context, r *http.Request) (*blo allocationChange := &allocation.AllocationChange{} allocationChange.ConnectionID = connectionObj.ConnectionID allocationChange.Size = 0 - allocationChange.Operation = allocation.CREATEDIR_OPERATION + allocationChange.Operation = constants.FileOperationCreateDir connectionObj.Size += allocationChange.Size var formData allocation.NewFileChange formData.Filename = dirPath @@ -1087,11 +1091,11 @@ func (fsh *StorageHandler) CreateDir(ctx context.Context, r *http.Request) (*blo func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*blobberhttp.UploadResult, error) { if r.Method == "GET" { - return nil, common.NewError("invalid_method", "Invalid method used for the upload URL. Use multi-part form POST / PUT / DELETE instead") + return nil, common.NewError("invalid_method", "Invalid method used for the upload URL. Use multi-part form POST / PUT / DELETE / PATCH instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { @@ -1105,7 +1109,7 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*blo publicKey := allocationObj.OwnerPublicKey if isCollaborator { - publicKey = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + publicKey = ctx.Value(constants.ContextKeyClientKey).(string) } valid, err := verifySignatureFromRequest(allocationTx, r.Header.Get(common.ClientSignatureHeader), publicKey) @@ -1132,6 +1136,14 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*blo return nil, common.NewError("invalid_parameters", "Invalid connection id passed") } + cmd := createFileCommand(r) + + err = cmd.IsAuthorized(ctx, r, allocationObj, clientID) + + if err != nil { + return nil, err + } + connectionObj, err := allocation.GetAllocationChanges(ctx, connectionID, allocationID, clientID) if err != nil { return nil, common.NewError("meta_error", "Error reading metadata for connection") @@ -1141,151 +1153,52 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*blo mutex.Lock() defer mutex.Unlock() - result := &blobberhttp.UploadResult{} - - if fileOperation == allocation.DELETE_OPERATION { - if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID { - return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") - } - result, err = fsh.DeleteFile(ctx, r, connectionObj) - if err != nil { - return nil, err - } - } else if fileOperation == allocation.INSERT_OPERATION || fileOperation == allocation.UPDATE_OPERATION { - formField := getFormFieldName(fileOperation) - var formData allocation.UpdateFileChange - uploadMetaString := r.FormValue(formField) - err = json.Unmarshal([]byte(uploadMetaString), &formData) - if err != nil { - return nil, common.NewError("invalid_parameters", - "Invalid parameters. Error parsing the meta data for upload."+err.Error()) - } - existingFileRefSize := int64(0) - existingFileOnCloud := false - if fileOperation == allocation.INSERT_OPERATION { - if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID { - return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") - } - - if existingFileRef != nil { - return nil, common.NewError("duplicate_file", "File at path already exists") - } - } else if fileOperation == allocation.UPDATE_OPERATION { - if existingFileRef == nil { - return nil, common.NewError("invalid_file_update", "File at path does not exist for update") - } - - if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID && !isCollaborator { - return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner, collaborator or the payer of the allocation") - } - } - - if existingFileRef != nil { - existingFileRefSize = existingFileRef.Size - existingFileOnCloud = existingFileRef.OnCloud - } - - origfile, _, err := r.FormFile("uploadFile") - if err != nil { - return nil, common.NewError("invalid_parameters", "Error Reading multi parts for file."+err.Error()) - } - defer origfile.Close() - - thumbfile, thumbHeader, _ := r.FormFile("uploadThumbnailFile") - thumbnailPresent := thumbHeader != nil - if thumbnailPresent { - defer thumbfile.Close() - } + result, err := cmd.ProcessContent(ctx, r, allocationObj, connectionObj) - fileInputData := &filestore.FileInputData{Name: formData.Filename, Path: formData.Path, OnCloud: existingFileOnCloud} - fileOutputData, err := filestore.GetFileStore().WriteFile(allocationID, fileInputData, origfile, connectionObj.ConnectionID) - if err != nil { - return nil, common.NewError("upload_error", "Failed to upload the file. "+err.Error()) - } - - result.Filename = formData.Filename - result.Hash = fileOutputData.ContentHash - result.MerkleRoot = fileOutputData.MerkleRoot - result.Size = fileOutputData.Size - - if len(formData.Hash) > 0 && formData.Hash != fileOutputData.ContentHash { - return nil, common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the file content") - } - if len(formData.MerkleRoot) > 0 && formData.MerkleRoot != fileOutputData.MerkleRoot { - return nil, common.NewError("content_merkle_root_mismatch", "Merkle root provided in the meta data does not match the file content") - } - if fileOutputData.Size > config.Configuration.MaxFileSize { - return nil, common.NewError("file_size_limit_exceeded", "Size for the given file is larger than the max limit") - } - - formData.Hash = fileOutputData.ContentHash - formData.MerkleRoot = fileOutputData.MerkleRoot - formData.AllocationID = allocationID - formData.Size = fileOutputData.Size + if err != nil { + return nil, err + } - allocationSize := fileOutputData.Size - if thumbnailPresent { - thumbInputData := &filestore.FileInputData{Name: thumbHeader.Filename, Path: formData.Path} - thumbOutputData, err := filestore.GetFileStore().WriteFile(allocationID, thumbInputData, thumbfile, connectionObj.ConnectionID) - if err != nil { - return nil, common.NewError("upload_error", "Failed to upload the thumbnail. "+err.Error()) - } - if len(formData.ThumbnailHash) > 0 && formData.ThumbnailHash != thumbOutputData.ContentHash { - return nil, common.NewError("content_hash_mismatch", "Content hash provided in the meta data does not match the thumbnail content") - } - formData.ThumbnailHash = thumbOutputData.ContentHash - formData.ThumbnailSize = thumbOutputData.Size - formData.ThumbnailFilename = thumbInputData.Name - } + err = cmd.ProcessThumbnail(ctx, r, allocationObj, connectionObj) - if allocationObj.BlobberSizeUsed+(allocationSize-existingFileRefSize) > allocationObj.BlobberSize { - return nil, common.NewError("max_allocation_size", "Max size reached for the allocation with this blobber") - } + if err != nil { + return nil, err + } - allocationChange := &allocation.AllocationChange{} - allocationChange.ConnectionID = connectionObj.ConnectionID - allocationChange.Size = allocationSize - existingFileRefSize - allocationChange.Operation = fileOperation + err = cmd.UpdateChange(ctx, connectionObj) - connectionObj.Size += allocationChange.Size - if fileOperation == allocation.INSERT_OPERATION { - connectionObj.AddChange(allocationChange, &formData.NewFileChange) - } else if fileOperation == allocation.UPDATE_OPERATION { - connectionObj.AddChange(allocationChange, &formData) - } - } - err = connectionObj.Save(ctx) if err != nil { Logger.Error("Error in writing the connection meta data", zap.Error(err)) - return nil, common.NewError("connection_write_error", "Error writing the connection meta data") + return nil, common.NewError("connection_write_error", err.Error()) //"Error writing the connection meta data") } - return result, nil + return &result, nil } func getFormFieldName(mode string) string { - formField := "uploadMeta" - if mode == allocation.UPDATE_OPERATION { - formField = "updateMeta" - } + return "uploadMeta" + // formField := "uploadMeta" + // if mode == constants.FileOperationUpdate { + // //formField = "updateMeta" + // } - return formField + //return formField } func getFileOperation(r *http.Request) string { - mode := allocation.INSERT_OPERATION + mode := constants.FileOperationInsert if r.Method == "PUT" { - mode = allocation.UPDATE_OPERATION + mode = constants.FileOperationUpdate } else if r.Method == "DELETE" { - mode = allocation.DELETE_OPERATION + mode = constants.FileOperationDelete } return mode } func getExistingFileRef(fsh *StorageHandler, ctx context.Context, r *http.Request, allocationObj *allocation.Allocation, fileOperation string) *reference.Ref { - if fileOperation == allocation.INSERT_OPERATION || fileOperation == allocation.UPDATE_OPERATION { - var formData allocation.UpdateFileChange + if fileOperation == constants.FileOperationInsert || fileOperation == constants.FileOperationUpdate { + var formData allocation.UpdateFileChanger uploadMetaString := r.FormValue(getFormFieldName(fileOperation)) err := json.Unmarshal([]byte(uploadMetaString), &formData) diff --git a/code/go/0chain.net/blobbercore/handler/object_operation_handler_bench_test.go b/code/go/0chain.net/blobbercore/handler/object_operation_handler_bench_test.go new file mode 100644 index 000000000..2020ebaa9 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/object_operation_handler_bench_test.go @@ -0,0 +1,198 @@ +package handler + +import ( + "context" + "net/http" + "strconv" + "strings" + "testing" + "time" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/filestore" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/mock" + "github.com/0chain/gosdk/zboxcore/fileref" + "github.com/0chain/gosdk/zboxcore/sdk" +) + +func BenchmarkUploadFileWithDisk(b *testing.B) { + + KB := 1024 + MB := 1024 * KB + //GB := 1024 * MB + + datastore.UseMocket(false) + blobber := mock.NewBlobberClient() + + allocationID := "benchmark_uploadfile" + + allocation := map[string]interface{}{ + "id": allocationID, + "tx": allocationID, + "size": 1024 * 1024 * 100, + "blobber_size": 1024 * 1024 * 1000, + "owner_id": blobber.ClientID, + "owner_public_key": blobber.Wallet.Keys[0].PublicKey, + "expiration_date": time.Now().Add(24 * time.Hour).Unix(), + } + + mock.MockGetAllocationByID(allocationID, allocation) + + formBuilder := sdk.CreateChunkedUploadFormBuilder() + + var storageHandler StorageHandler + + benchmarks := []struct { + Name string + // Size int + ChunkSize int + }{ + {Name: "64K", ChunkSize: 64 * KB}, + {Name: "640K", ChunkSize: 640 * KB}, + {Name: "6M", ChunkSize: 6 * MB}, + {Name: "60M", ChunkSize: 60 * MB}, + } + + for _, bm := range benchmarks { + b.Run(bm.Name, func(b *testing.B) { + + fileName := strings.Replace(bm.Name, " ", "_", -1) + ".txt" + chunkBytes := mock.GenerateRandomBytes(bm.ChunkSize) + fileMeta := &sdk.FileMeta{ + Path: "/tmp/" + fileName, + ActualSize: int64(bm.ChunkSize), + + MimeType: "plain/text", + RemoteName: fileName, + RemotePath: "/" + fileName, + Attributes: fileref.Attributes{}, + } + + hasher := sdk.CreateHasher(bm.ChunkSize) + isFinal := false + + body, formData, _ := formBuilder.Build(fileMeta, hasher, strconv.FormatInt(time.Now().UnixNano(), 10), int64(bm.ChunkSize), 0, isFinal, "", chunkBytes, nil) + + req, err := blobber.NewRequest(http.MethodPost, "http://127.0.0.1:5051/v1/file/upload/benchmark_upload", body) + + if err != nil { + b.Fatal(err) + return + } + + req.Header.Set("Content-Type", formData.ContentType) + err = blobber.SignRequest(req, allocationID) + if err != nil { + b.Fatal(err) + return + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + ctx := GetMetaDataStore().CreateTransaction(context.TODO()) + ctx = mock.SetupHandlerContext(ctx, req, allocationID) + _, err := storageHandler.WriteFile(ctx, req) + + if err != nil { + b.Fatal(err) + return + } + + } + }) + } + +} + +func BenchmarkUploadFileWithNoDisk(b *testing.B) { + + KB := 1024 + MB := 1024 * KB + //GB := 1024 * MB + + datastore.UseMocket(false) + filestore.UseMock() + blobber := mock.NewBlobberClient() + + allocationID := "benchmark_uploadfile" + + allocation := map[string]interface{}{ + "id": allocationID, + "tx": allocationID, + "size": 1024 * 1024 * 100, + "blobber_size": 1024 * 1024 * 1000, + "owner_id": blobber.ClientID, + "owner_public_key": blobber.Wallet.Keys[0].PublicKey, + "expiration_date": time.Now().Add(24 * time.Hour).Unix(), + } + + mock.MockGetAllocationByID(allocationID, allocation) + + formBuilder := sdk.CreateChunkedUploadFormBuilder() + + var storageHandler StorageHandler + + benchmarks := []struct { + Name string + // Size int + ChunkSize int + }{ + {Name: "64K", ChunkSize: 64 * KB}, + {Name: "640K", ChunkSize: 640 * KB}, + {Name: "6M", ChunkSize: 6 * MB}, + {Name: "60M", ChunkSize: 60 * MB}, + } + + for _, bm := range benchmarks { + b.Run(bm.Name, func(b *testing.B) { + + fileName := strings.Replace(bm.Name, " ", "_", -1) + ".txt" + chunkBytes := mock.GenerateRandomBytes(bm.ChunkSize) + fileMeta := &sdk.FileMeta{ + Path: "/tmp/" + fileName, + ActualSize: int64(bm.ChunkSize), + + MimeType: "plain/text", + RemoteName: fileName, + RemotePath: "/" + fileName, + Attributes: fileref.Attributes{}, + } + + hasher := sdk.CreateHasher(bm.ChunkSize) + isFinal := false + + body, formData, _ := formBuilder.Build(fileMeta, hasher, strconv.FormatInt(time.Now().UnixNano(), 10), int64(bm.ChunkSize), 0, isFinal, "", chunkBytes, nil) + + req, err := blobber.NewRequest(http.MethodPost, "http://127.0.0.1:5051/v1/file/upload/benchmark_upload", body) + + if err != nil { + b.Fatal(err) + return + } + + req.Header.Set("Content-Type", formData.ContentType) + err = blobber.SignRequest(req, allocationID) + if err != nil { + b.Fatal(err) + return + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + ctx := GetMetaDataStore().CreateTransaction(context.TODO()) + + ctx = mock.SetupHandlerContext(ctx, req, allocationID) + _, err := storageHandler.WriteFile(ctx, req) + + if err != nil { + b.Fatal(err) + return + } + + } + }) + } + +} diff --git a/code/go/0chain.net/blobbercore/handler/object_operation_handler_test.go b/code/go/0chain.net/blobbercore/handler/object_operation_handler_test.go index 7a861f89c..66c910ded 100644 --- a/code/go/0chain.net/blobbercore/handler/object_operation_handler_test.go +++ b/code/go/0chain.net/blobbercore/handler/object_operation_handler_test.go @@ -31,8 +31,8 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" + "github.com/0chain/gosdk/constants" "github.com/stretchr/testify/require" "testing" @@ -40,19 +40,19 @@ import ( func TestDownloadFile(t *testing.T) { const ( - mocketLogging = false - mockBlobberId = "mock_blobber_id" - mockAllocationId = "mock_allocation_id" - mockAllocationTx = "mock_allocation_Tx" + mocketLogging = false + mockBlobberId = "mock_blobber_id" + mockAllocationId = "mock_allocation_id" + mockAllocationTx = "mock_allocation_Tx" mockRemoteFilePath = "mock/remote/file/path" mockBlockNumber = 1 mockEncryptKey = "mock encrypt key" - mockClientWallet = "{\"client_id\":\"9a566aa4f8e8c342fed97c8928040a21f21b8f574e5782c28568635ba9c75a85\",\"client_key\":\"40cd10039913ceabacf05a7c60e1ad69bb2964987bc50f77495e514dc451f907c3d8ebcdab20eedde9c8f39b9a1d66609a637352f318552fb69d4b3672516d1a\",\"keys\":[{\"public_key\":\"40cd10039913ceabacf05a7c60e1ad69bb2964987bc50f77495e514dc451f907c3d8ebcdab20eedde9c8f39b9a1d66609a637352f318552fb69d4b3672516d1a\",\"private_key\":\"a3a88aad5d89cec28c6e37c2925560ce160ac14d2cdcf4a4654b2bb358fe7514\"}],\"mnemonics\":\"inside february piece turkey offer merry select combine tissue wave wet shift room afraid december gown mean brick speak grant gain become toy clown\",\"version\":\"1.0\",\"date_created\":\"2021-05-21 17:32:29.484657 +0545 +0545 m=+0.072791323\"}" - mockOwnerWallet = "{\"client_id\":\"5d0229e0141071c1f88785b1faba4b612582f9d446b02e8d893f1e0d0ce92cdc\",\"client_key\":\"aefef5778906680360cf55bf462823367161520ad95ca183445a879a59c9bf0470b74e41fc12f2ee0ce9c19c4e77878d734226918672d089f561ecf1d5435720\",\"keys\":[{\"public_key\":\"aefef5778906680360cf55bf462823367161520ad95ca183445a879a59c9bf0470b74e41fc12f2ee0ce9c19c4e77878d734226918672d089f561ecf1d5435720\",\"private_key\":\"4f8af6fb1098a3817d705aef96db933f31755674b00a5d38bb2439c0a27b0117\"}],\"mnemonics\":\"erode transfer noble civil ridge cloth sentence gauge board wheel sight caution okay sand ranch ice frozen frown grape lion feed fox game zone\",\"version\":\"1.0\",\"date_created\":\"2021-09-04T14:11:06+01:00\"}" - mockReadPrice = int64(0.1 * 1e10) - mockWritePrice = int64(0.5 * 1e10) - mockBigBalance = int64(10000 * 1e10) - mockPoolId = "mock pool id" + mockClientWallet = "{\"client_id\":\"9a566aa4f8e8c342fed97c8928040a21f21b8f574e5782c28568635ba9c75a85\",\"client_key\":\"40cd10039913ceabacf05a7c60e1ad69bb2964987bc50f77495e514dc451f907c3d8ebcdab20eedde9c8f39b9a1d66609a637352f318552fb69d4b3672516d1a\",\"keys\":[{\"public_key\":\"40cd10039913ceabacf05a7c60e1ad69bb2964987bc50f77495e514dc451f907c3d8ebcdab20eedde9c8f39b9a1d66609a637352f318552fb69d4b3672516d1a\",\"private_key\":\"a3a88aad5d89cec28c6e37c2925560ce160ac14d2cdcf4a4654b2bb358fe7514\"}],\"mnemonics\":\"inside february piece turkey offer merry select combine tissue wave wet shift room afraid december gown mean brick speak grant gain become toy clown\",\"version\":\"1.0\",\"date_created\":\"2021-05-21 17:32:29.484657 +0545 +0545 m=+0.072791323\"}" + mockOwnerWallet = "{\"client_id\":\"5d0229e0141071c1f88785b1faba4b612582f9d446b02e8d893f1e0d0ce92cdc\",\"client_key\":\"aefef5778906680360cf55bf462823367161520ad95ca183445a879a59c9bf0470b74e41fc12f2ee0ce9c19c4e77878d734226918672d089f561ecf1d5435720\",\"keys\":[{\"public_key\":\"aefef5778906680360cf55bf462823367161520ad95ca183445a879a59c9bf0470b74e41fc12f2ee0ce9c19c4e77878d734226918672d089f561ecf1d5435720\",\"private_key\":\"4f8af6fb1098a3817d705aef96db933f31755674b00a5d38bb2439c0a27b0117\"}],\"mnemonics\":\"erode transfer noble civil ridge cloth sentence gauge board wheel sight caution okay sand ranch ice frozen frown grape lion feed fox game zone\",\"version\":\"1.0\",\"date_created\":\"2021-09-04T14:11:06+01:00\"}" + mockReadPrice = int64(0.1 * 1e10) + mockWritePrice = int64(0.5 * 1e10) + mockBigBalance = int64(10000 * 1e10) + mockPoolId = "mock pool id" ) ts := time.Now().Add(time.Hour) var mockLongTimeInFuture = common.Timestamp(ts.Unix()) + common.Timestamp(time.Second*1000) @@ -405,12 +405,12 @@ func TestDownloadFile(t *testing.T) { setupCtx := func(p parameters) context.Context { ctx := context.TODO() - ctx = context.WithValue(ctx, constants.CLIENT_CONTEXT_KEY, client.GetClientID()) - ctx = context.WithValue(ctx, constants.ALLOCATION_CONTEXT_KEY, p.inData.allocationTx) - ctx = context.WithValue(ctx, constants.CLIENT_KEY_CONTEXT_KEY, client.GetClientPublicKey()) + ctx = context.WithValue(ctx, constants.ContextKeyClient, client.GetClientID()) + ctx = context.WithValue(ctx, constants.ContextKeyAllocation, p.inData.allocationTx) + ctx = context.WithValue(ctx, constants.ContextKeyClientKey, client.GetClientPublicKey()) db := datastore.GetStore().GetDB().Begin() - ctx = context.WithValue(ctx, datastore.CONNECTION_CONTEXT_KEY, db) + ctx = context.WithValue(ctx, datastore.ContextKeyTransaction, db) return ctx } diff --git a/code/go/0chain.net/blobbercore/handler/storage_handler.go b/code/go/0chain.net/blobbercore/handler/storage_handler.go index 1b682fd3f..bed0323ea 100644 --- a/code/go/0chain.net/blobbercore/handler/storage_handler.go +++ b/code/go/0chain.net/blobbercore/handler/storage_handler.go @@ -10,7 +10,6 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobberhttp" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/readmarker" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/reference" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/stats" @@ -18,19 +17,21 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/core/common" "github.com/0chain/blobber/code/go/0chain.net/core/encryption" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/0chain/gosdk/constants" "go.uber.org/zap" ) const ( FormFileParseMaxMemory = 10 * 1024 * 1024 OffsetDateLayout = "2006-01-02T15:04:05.99999Z07:00" - DownloadCcontentFull = "full" + DownloadContentFull = "full" DownloadContentThumb = "thumbnail" - PageLimit = 100 //100 rows will make upto 100 KB + PageLimit = 100 //100 rows will make up to 100 KB ) type StorageHandler struct{} +// verifyAllocation try to get allocation from postgres.if it doesn't exists, get it from sharders, and insert it into postgres. func (fsh *StorageHandler) verifyAllocation(ctx context.Context, tx string, readonly bool) (alloc *allocation.Allocation, err error) { @@ -121,7 +122,7 @@ func (fsh *StorageHandler) GetFileMeta(ctx context.Context, r *http.Request) (in if r.Method == "GET" { return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) alloc, err := fsh.verifyAllocation(ctx, allocationTx, true) if err != nil { @@ -129,12 +130,12 @@ func (fsh *StorageHandler) GetFileMeta(ctx context.Context, r *http.Request) (in } allocationID := alloc.ID - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) pathHash, err := pathHashFromReq(r, allocationID) if err != nil { @@ -191,7 +192,7 @@ func (fsh *StorageHandler) AddCommitMetaTxn(ctx context.Context, r *http.Request if r.Method == "GET" { return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, true) if err != nil { @@ -199,12 +200,12 @@ func (fsh *StorageHandler) AddCommitMetaTxn(ctx context.Context, r *http.Request } allocationID := allocationObj.ID - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) pathHash, err := pathHashFromReq(r, allocationID) if err != nil { @@ -252,21 +253,21 @@ func (fsh *StorageHandler) AddCommitMetaTxn(ctx context.Context, r *http.Request } func (fsh *StorageHandler) AddCollaborator(ctx context.Context, r *http.Request) (interface{}, error) { - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - clientSign, _ := ctx.Value(constants.CLIENT_SIGNATURE_HEADER_KEY).(string) + clientSign, _ := ctx.Value(constants.ContextKeyClientSignatureHeaderKey).(string) valid, err := verifySignatureFromRequest(allocationTx, clientSign, allocationObj.OwnerPublicKey) if !valid || err != nil { return nil, common.NewError("invalid_signature", "Invalid signature") } allocationID := allocationObj.ID - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) pathHash, err := pathHashFromReq(r, allocationID) if err != nil { @@ -338,25 +339,25 @@ func (fsh *StorageHandler) GetFileStats(ctx context.Context, r *http.Request) (i if r.Method == "GET" { return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } allocationID := allocationObj.ID - clientSign, _ := ctx.Value(constants.CLIENT_SIGNATURE_HEADER_KEY).(string) + clientSign, _ := ctx.Value(constants.ContextKeyClientSignatureHeaderKey).(string) valid, err := verifySignatureFromRequest(allocationTx, clientSign, allocationObj.OwnerPublicKey) if !valid || err != nil { return nil, common.NewError("invalid_signature", "Invalid signature") } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 || allocationObj.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + _ = ctx.Value(constants.ContextKeyClientKey).(string) pathHash, err := pathHashFromReq(r, allocationID) if err != nil { @@ -395,8 +396,8 @@ func (fsh *StorageHandler) ListEntities(ctx context.Context, r *http.Request) (* if r.Method == "POST" { return nil, common.NewError("invalid_method", "Invalid method used. Use GET instead") } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, true) if err != nil { @@ -475,7 +476,7 @@ func (fsh *StorageHandler) getReferencePath(ctx context.Context, r *http.Request return } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { errCh <- common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) @@ -483,14 +484,14 @@ func (fsh *StorageHandler) getReferencePath(ctx context.Context, r *http.Request } allocationID := allocationObj.ID - clientSign, _ := ctx.Value(constants.CLIENT_SIGNATURE_HEADER_KEY).(string) + clientSign, _ := ctx.Value(constants.ContextKeyClientSignatureHeaderKey).(string) valid, err := verifySignatureFromRequest(allocationTx, clientSign, allocationObj.OwnerPublicKey) if !valid || err != nil { errCh <- common.NewError("invalid_signature", "Invalid signature") return } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 { errCh <- common.NewError("invalid_operation", "Please pass clientID in the header") return @@ -509,8 +510,10 @@ func (fsh *StorageHandler) getReferencePath(ctx context.Context, r *http.Request } refPath := &reference.ReferencePath{Ref: rootRef} - refsToProcess := make([]*reference.ReferencePath, 0) - refsToProcess = append(refsToProcess, refPath) + + refsToProcess := []*reference.ReferencePath{refPath} + + //convert Ref tree to ReferencePath tree for len(refsToProcess) > 0 { refToProcess := refsToProcess[0] refToProcess.Meta = refToProcess.Ref.GetListingData(ctx) @@ -548,20 +551,20 @@ func (fsh *StorageHandler) GetObjectPath(ctx context.Context, r *http.Request) ( if r.Method == "POST" { return nil, common.NewError("invalid_method", "Invalid method used. Use GET instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } allocationID := allocationObj.ID - clientSign, _ := ctx.Value(constants.CLIENT_SIGNATURE_HEADER_KEY).(string) + clientSign, _ := ctx.Value(constants.ContextKeyClientSignatureHeaderKey).(string) valid, err := verifySignatureFromRequest(allocationTx, clientSign, allocationObj.OwnerPublicKey) if !valid || err != nil { return nil, common.NewError("invalid_signature", "Invalid signature") } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 || allocationObj.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } @@ -606,7 +609,7 @@ func (fsh *StorageHandler) GetObjectTree(ctx context.Context, r *http.Request) ( if r.Method == "POST" { return nil, common.NewError("invalid_method", "Invalid method used. Use GET instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { @@ -614,13 +617,13 @@ func (fsh *StorageHandler) GetObjectTree(ctx context.Context, r *http.Request) ( } allocationID := allocationObj.ID - clientSign, _ := ctx.Value(constants.CLIENT_SIGNATURE_HEADER_KEY).(string) + clientSign, _ := ctx.Value(constants.ContextKeyClientSignatureHeaderKey).(string) valid, err := verifySignatureFromRequest(allocationTx, clientSign, allocationObj.OwnerPublicKey) if !valid || err != nil { return nil, common.NewError("invalid_signature", "Invalid signature") } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 || allocationObj.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } @@ -673,21 +676,21 @@ func (fsh *StorageHandler) GetObjectTree(ctx context.Context, r *http.Request) ( //Updated gives rows that is updated compared to the date given. And deleted gives deleted refs compared to the date given. //Updated date time format should be as declared in above constant; OffsetDateLayout func (fsh *StorageHandler) GetRefs(ctx context.Context, r *http.Request) (*blobberhttp.RefResult, error) { - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - clientSign, _ := ctx.Value(constants.CLIENT_SIGNATURE_HEADER_KEY).(string) + clientSign, _ := ctx.Value(constants.ContextKeyClientSignatureHeaderKey).(string) valid, err := verifySignatureFromRequest(allocationTx, clientSign, allocationObj.OwnerPublicKey) if !valid || err != nil { return nil, common.NewError("invalid_signature", "Invalid signature") } allocationID := allocationObj.ID - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 || allocationObj.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } @@ -786,7 +789,7 @@ func (fsh *StorageHandler) CalculateHash(ctx context.Context, r *http.Request) ( if r.Method != "POST" { return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") } - allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationTx := ctx.Value(constants.ContextKeyAllocation).(string) allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { @@ -794,7 +797,7 @@ func (fsh *StorageHandler) CalculateHash(ctx context.Context, r *http.Request) ( } allocationID := allocationObj.ID - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 || allocationObj.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } diff --git a/code/go/0chain.net/blobbercore/handler/storage_handler_bench_test.go b/code/go/0chain.net/blobbercore/handler/storage_handler_bench_test.go new file mode 100644 index 000000000..abeebd162 --- /dev/null +++ b/code/go/0chain.net/blobbercore/handler/storage_handler_bench_test.go @@ -0,0 +1 @@ +package handler diff --git a/code/go/0chain.net/blobbercore/handler/upload_integration_test.go b/code/go/0chain.net/blobbercore/handler/upload_integration_test.go index 08f7f4a09..b06d7c048 100644 --- a/code/go/0chain.net/blobbercore/handler/upload_integration_test.go +++ b/code/go/0chain.net/blobbercore/handler/upload_integration_test.go @@ -4,11 +4,12 @@ import ( "context" "encoding/hex" "encoding/json" - blobbergrpc "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobbergrpc/proto" "io" "os" "testing" + blobbergrpc "github.com/0chain/blobber/code/go/0chain.net/blobbercore/blobbergrpc/proto" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" "github.com/0chain/blobber/code/go/0chain.net/core/common" "github.com/0chain/blobber/code/go/0chain.net/core/encryption" @@ -24,7 +25,7 @@ func TestBlobberGRPCService_UploadFile(t *testing.T) { pubKeyBytes, _ := hex.DecodeString(pubKey) clientId := encryption.Hash(pubKeyBytes) - formFieldByt, err := json.Marshal(&allocation.UpdateFileChange{NewFileChange: allocation.NewFileChange{Filename: `helper_integration_test.go`}}) + formFieldByt, err := json.Marshal(&allocation.UpdateFileChanger{BaseFileChanger: allocation.BaseFileChanger{Filename: `helper_integration_test.go`}}) if err != nil { t.Fatal(err) } diff --git a/code/go/0chain.net/blobbercore/handler/worker.go b/code/go/0chain.net/blobbercore/handler/worker.go index 4a086c402..3aa705f1d 100644 --- a/code/go/0chain.net/blobbercore/handler/worker.go +++ b/code/go/0chain.net/blobbercore/handler/worker.go @@ -20,9 +20,9 @@ import ( ) func SetupWorkers(ctx context.Context) { - go CleanupTempFiles(ctx) + go startCleanupTempFiles(ctx) if config.Configuration.MinioStart { - go MoveColdDataToCloud(ctx) + go startMoveColdDataToCloud(ctx) } } @@ -52,7 +52,40 @@ func CleanupDiskFiles(ctx context.Context) error { return nil } -func CleanupTempFiles(ctx context.Context) { +func cleanupTempFiles(ctx context.Context) { + defer func() { + if r := recover(); r != nil { + Logger.Error("[recover] cleanupTempFiles", zap.Any("err", r)) + } + }() + + rctx := datastore.GetStore().CreateTransaction(ctx) + db := datastore.GetStore().GetTransaction(rctx) + now := time.Now().UTC() + then := now.Add(time.Duration(-config.Configuration.OpenConnectionWorkerTolerance) * time.Second) + + var openConnectionsToDelete []allocation.AllocationChangeCollector + db.Table((&allocation.AllocationChangeCollector{}).TableName()).Where("updated_at < ? AND status IN (?,?)", then, allocation.NewConnection, allocation.InProgressConnection).Preload("Changes").Find(&openConnectionsToDelete) + for _, connection := range openConnectionsToDelete { + + Logger.Info("Deleting temp files for the connection", zap.Any("connection", connection.ConnectionID)) + connection.ComputeProperties() + nctx := datastore.GetStore().CreateTransaction(ctx) + ndb := datastore.GetStore().GetTransaction(nctx) + for _, changeProcessor := range connection.AllocationChanges { + if err := changeProcessor.DeleteTempFile(); err != nil { + Logger.Error("AllocationChangeProcessor_DeleteTempFile", zap.Error(err)) + } + } + ndb.Model(connection).Updates(allocation.AllocationChangeCollector{Status: allocation.DeletedConnection}) + ndb.Commit() + nctx.Done() + } + db.Rollback() + rctx.Done() +} + +func startCleanupTempFiles(ctx context.Context) { var iterInprogress = false ticker := time.NewTicker(time.Duration(config.Configuration.OpenConnectionWorkerFreq) * time.Second) for { @@ -63,35 +96,71 @@ func CleanupTempFiles(ctx context.Context) { //Logger.Info("Trying to redeem writemarkers.", zap.Any("iterInprogress", iterInprogress), zap.Any("numOfWorkers", numOfWorkers)) if !iterInprogress { iterInprogress = true //nolint:ineffassign // probably has something to do with goroutines - rctx := datastore.GetStore().CreateTransaction(ctx) - db := datastore.GetStore().GetTransaction(rctx) - now := time.Now() - then := now.Add(time.Duration(-config.Configuration.OpenConnectionWorkerTolerance) * time.Second) - var openConnectionsToDelete []allocation.AllocationChangeCollector - db.Table((&allocation.AllocationChangeCollector{}).TableName()).Where("updated_at < ? AND status IN (?,?)", then, allocation.NewConnection, allocation.InProgressConnection).Preload("Changes").Find(&openConnectionsToDelete) - for _, connection := range openConnectionsToDelete { - Logger.Info("Deleting temp files for the connection", zap.Any("connection", connection.ConnectionID)) - connection.ComputeProperties() - nctx := datastore.GetStore().CreateTransaction(ctx) - ndb := datastore.GetStore().GetTransaction(nctx) - for _, changeProcessor := range connection.AllocationChanges { - if err := changeProcessor.DeleteTempFile(); err != nil { - Logger.Error("AllocationChangeProcessor_DeleteTempFile", zap.Error(err)) - } - } - ndb.Model(connection).Updates(allocation.AllocationChangeCollector{Status: allocation.DeletedConnection}) - ndb.Commit() - nctx.Done() - } - db.Rollback() - rctx.Done() + cleanupTempFiles(ctx) iterInprogress = false } } } } -func MoveColdDataToCloud(ctx context.Context) { +func moveColdDataToCloud(ctx context.Context, coldStorageMinFileSize int64, limit int64) { + defer func() { + if r := recover(); r != nil { + Logger.Error("[recover] moveColdDataToCloud", zap.Any("err", r)) + } + }() + + fs := filestore.GetFileStore() + totalDiskSizeUsed, err := fs.GetTotalDiskSizeUsed() + if err != nil { + Logger.Error("Unable to get total disk size used from the file store", zap.Error(err)) + return + } + + // Check if capacity exceded the start capacity size + if totalDiskSizeUsed > config.Configuration.ColdStorageStartCapacitySize { + rctx := datastore.GetStore().CreateTransaction(ctx) + db := datastore.GetStore().GetTransaction(rctx) + // Get total number of fileRefs with size greater than limit and on_cloud = false + var totalRecords int64 + db.Table((&reference.Ref{}).TableName()). + Where("size > ? AND on_cloud = ?", coldStorageMinFileSize, false). + Count(&totalRecords) + + offset := int64(0) + for offset < totalRecords { + // Get all fileRefs with size greater than limit and on_cloud false + var fileRefs []*reference.Ref + db.Offset(int(offset)).Limit(int(limit)). + Table((&reference.Ref{}).TableName()). + Where("size > ? AND on_cloud = ?", coldStorageMinFileSize, false). + Find(&fileRefs) + + for _, fileRef := range fileRefs { + if fileRef.Type == reference.DIRECTORY { + continue + } + + fileStat, err := stats.GetFileStats(rctx, fileRef.ID) + if err != nil { + Logger.Error("Unable to find filestats for fileRef with", zap.Any("reID", fileRef.ID)) + continue + } + + timeToAdd := time.Duration(config.Configuration.ColdStorageTimeLimitInHours) * time.Hour + if fileStat.UpdatedAt.Before(time.Now().Add(-1 * timeToAdd)) { + Logger.Info("Moving file to cloud", zap.Any("path", fileRef.Path), zap.Any("allocation", fileRef.AllocationID)) + moveFileToCloud(ctx, fileRef) + } + } + offset = offset + limit + } + db.Commit() + rctx.Done() + } +} + +func startMoveColdDataToCloud(ctx context.Context) { var iterInprogress = false var coldStorageMinFileSize = config.Configuration.ColdStorageMinimumFileSize var limit = config.Configuration.ColdStorageJobQueryLimit @@ -102,54 +171,8 @@ func MoveColdDataToCloud(ctx context.Context) { return case <-ticker.C: if !iterInprogress { - fs := filestore.GetFileStore() - totalDiskSizeUsed, err := fs.GetTotalDiskSizeUsed() - if err != nil { - Logger.Error("Unable to get total disk size used from the file store", zap.Error(err)) - return - } - // Check if capacity exceded the start capacity size - if totalDiskSizeUsed > config.Configuration.ColdStorageStartCapacitySize { - rctx := datastore.GetStore().CreateTransaction(ctx) - db := datastore.GetStore().GetTransaction(rctx) - // Get total number of fileRefs with size greater than limit and on_cloud = false - var totalRecords int64 - db.Table((&reference.Ref{}).TableName()). - Where("size > ? AND on_cloud = ?", coldStorageMinFileSize, false). - Count(&totalRecords) - - offset := int64(0) - for offset < totalRecords { - // Get all fileRefs with size greater than limit and on_cloud false - var fileRefs []*reference.Ref - db.Offset(int(offset)).Limit(int(limit)). - Table((&reference.Ref{}).TableName()). - Where("size > ? AND on_cloud = ?", coldStorageMinFileSize, false). - Find(&fileRefs) - - for _, fileRef := range fileRefs { - if fileRef.Type == reference.DIRECTORY { - continue - } - - fileStat, err := stats.GetFileStats(rctx, fileRef.ID) - if err != nil { - Logger.Error("Unable to find filestats for fileRef with", zap.Any("reID", fileRef.ID)) - continue - } - - timeToAdd := time.Duration(config.Configuration.ColdStorageTimeLimitInHours) * time.Hour - if fileStat.UpdatedAt.Before(time.Now().Add(-1 * timeToAdd)) { - Logger.Info("Moving file to cloud", zap.Any("path", fileRef.Path), zap.Any("allocation", fileRef.AllocationID)) - moveFileToCloud(ctx, fileRef) - } - } - offset = offset + limit - } - db.Commit() - rctx.Done() - } + moveColdDataToCloud(ctx, coldStorageMinFileSize, limit) iterInprogress = false stats.LastMinioScan = time.Now() Logger.Info("Move cold data to cloud worker running successfully") diff --git a/code/go/0chain.net/blobbercore/mock/allocation.go b/code/go/0chain.net/blobbercore/mock/allocation.go new file mode 100644 index 000000000..5d1896cf4 --- /dev/null +++ b/code/go/0chain.net/blobbercore/mock/allocation.go @@ -0,0 +1,12 @@ +package mock + +import ( + "github.com/selvatico/go-mocket" +) + +func MockGetAllocationByID(allocationID string, allocation map[string]interface{}) { + gomocket.Catcher.NewMock(). + WithQuery(`SELECT * FROM "allocations" WHERE "allocations"."tx" = $1 ORDER BY "allocations"."id" LIMIT 1`). + WithArgs(allocationID). + WithReply([]map[string]interface{}{allocation}) +} diff --git a/code/go/0chain.net/blobbercore/mock/buf.go b/code/go/0chain.net/blobbercore/mock/buf.go new file mode 100644 index 000000000..b9f161855 --- /dev/null +++ b/code/go/0chain.net/blobbercore/mock/buf.go @@ -0,0 +1,14 @@ +package mock + +import "crypto/rand" + +func GenerateRandomBytes(n int) []byte { + b := make([]byte, n) + _, err := rand.Read(b) + // Note that err == nil only if we read len(b) bytes. + if err != nil { + return nil + } + + return b +} diff --git a/code/go/0chain.net/blobbercore/mock/ctx.go b/code/go/0chain.net/blobbercore/mock/ctx.go new file mode 100644 index 000000000..7573aa48f --- /dev/null +++ b/code/go/0chain.net/blobbercore/mock/ctx.go @@ -0,0 +1,22 @@ +package mock + +import ( + "context" + "net/http" + + "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/gosdk/constants" +) + +func SetupHandlerContext(ctx context.Context, r *http.Request, allocation string) context.Context { + + ctx = context.WithValue(ctx, constants.ContextKeyClient, + r.Header.Get(common.ClientHeader)) + ctx = context.WithValue(ctx, constants.ContextKeyClientKey, + r.Header.Get(common.ClientKeyHeader)) + ctx = context.WithValue(ctx, constants.ContextKeyAllocation, + allocation) + // signature is not required for all requests, but if header is empty it won`t affect anything + ctx = context.WithValue(ctx, constants.ContextKeyClientSignatureHeaderKey, r.Header.Get(common.ClientSignatureHeader)) + return ctx +} diff --git a/code/go/0chain.net/blobbercore/mock/init.go b/code/go/0chain.net/blobbercore/mock/init.go new file mode 100644 index 000000000..768e21bd9 --- /dev/null +++ b/code/go/0chain.net/blobbercore/mock/init.go @@ -0,0 +1,35 @@ +package mock + +import ( + "io" + "net/http" + "net/http/httptest" + + "github.com/0chain/gosdk/sdks" + "github.com/0chain/gosdk/sdks/blobber" +) + +const ( + zboxWallet = "{\"client_id\":\"9a566aa4f8e8c342fed97c8928040a21f21b8f574e5782c28568635ba9c75a85\",\"client_key\":\"40cd10039913ceabacf05a7c60e1ad69bb2964987bc50f77495e514dc451f907c3d8ebcdab20eedde9c8f39b9a1d66609a637352f318552fb69d4b3672516d1a\",\"keys\":[{\"public_key\":\"40cd10039913ceabacf05a7c60e1ad69bb2964987bc50f77495e514dc451f907c3d8ebcdab20eedde9c8f39b9a1d66609a637352f318552fb69d4b3672516d1a\",\"private_key\":\"a3a88aad5d89cec28c6e37c2925560ce160ac14d2cdcf4a4654b2bb358fe7514\"}],\"mnemonics\":\"inside february piece turkey offer merry select combine tissue wave wet shift room afraid december gown mean brick speak grant gain become toy clown\",\"version\":\"1.0\",\"date_created\":\"2021-05-21 17:32:29.484657 +0545 +0545 m=+0.072791323\"}" +) + +// const ( +// mockOwnerWallet = "{\"client_id\":\"5d0229e0141071c1f88785b1faba4b612582f9d446b02e8d893f1e0d0ce92cdc\",\"client_key\":\"aefef5778906680360cf55bf462823367161520ad95ca183445a879a59c9bf0470b74e41fc12f2ee0ce9c19c4e77878d734226918672d089f561ecf1d5435720\",\"keys\":[{\"public_key\":\"aefef5778906680360cf55bf462823367161520ad95ca183445a879a59c9bf0470b74e41fc12f2ee0ce9c19c4e77878d734226918672d089f561ecf1d5435720\",\"private_key\":\"4f8af6fb1098a3817d705aef96db933f31755674b00a5d38bb2439c0a27b0117\"}],\"mnemonics\":\"erode transfer noble civil ridge cloth sentence gauge board wheel sight caution okay sand ranch ice frozen frown grape lion feed fox game zone\",\"version\":\"1.0\",\"date_created\":\"2021-09-04T14:11:06+01:00\"}" +// ) + +func NewBlobberClient() *blobber.Blobber { + + z := sdks.New("9a566aa4f8e8c342fed97c8928040a21f21b8f574e5782c28568635ba9c75a85", "40cd10039913ceabacf05a7c60e1ad69bb2964987bc50f77495e514dc451f907c3d8ebcdab20eedde9c8f39b9a1d66609a637352f318552fb69d4b3672516d1a", "bls0chain") + err := z.InitWallet(zboxWallet) + if err != nil { + panic("mock: z.InitWallet " + err.Error()) + } + z.NewRequest = func(method, url string, body io.Reader) (*http.Request, error) { + return httptest.NewRequest(method, url, body), nil + } + return blobber.New(z, "http://127.0.0.1:5051/") +} + +func InitServer() { + //client.PopulateClient(mockOwnerWallet, "bls0chain") +} diff --git a/code/go/0chain.net/blobbercore/readmarker/protocol.go b/code/go/0chain.net/blobbercore/readmarker/protocol.go index fb2e08f4f..87795e6c1 100644 --- a/code/go/0chain.net/blobbercore/readmarker/protocol.go +++ b/code/go/0chain.net/blobbercore/readmarker/protocol.go @@ -6,7 +6,7 @@ import ( "time" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" + "github.com/0chain/gosdk/constants" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" "github.com/0chain/blobber/code/go/0chain.net/core/chain" @@ -35,12 +35,12 @@ func (rm *ReadMarkerEntity) VerifyMarker(ctx context.Context, sa *allocation.All return common.NewError("read_marker_validation_failed", "Read Marker is not for the blobber") } - clientPublicKey := ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + clientPublicKey := ctx.Value(constants.ContextKeyClientKey).(string) if len(clientPublicKey) == 0 || clientPublicKey != rm.LatestRM.ClientPublicKey { return common.NewError("read_marker_validation_failed", "Could not get the public key of the client") } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 || clientID != rm.LatestRM.ClientID { return common.NewError("read_marker_validation_failed", "Read Marker clientID does not match request clientID") } diff --git a/code/go/0chain.net/blobbercore/readmarker/readmarker.go b/code/go/0chain.net/blobbercore/readmarker/readmarker.go new file mode 100644 index 000000000..665e442a9 --- /dev/null +++ b/code/go/0chain.net/blobbercore/readmarker/readmarker.go @@ -0,0 +1,56 @@ +package readmarker + +import ( + "context" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" + "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/remeh/sizedwaitgroup" + "go.uber.org/zap" +) + +func redeemReadMarker(ctx context.Context) { + defer func() { + iterInprogress = false + if r := recover(); r != nil { + logging.Logger.Error("[recover] redeemReadMarker", zap.Any("err", r)) + } + + }() + + if !iterInprogress { + iterInprogress = true + rctx := datastore.GetStore().CreateTransaction(ctx) + db := datastore.GetStore().GetTransaction(rctx) + readMarkers := make([]*ReadMarkerEntity, 0) + rm := &ReadMarkerEntity{RedeemRequired: true} + db.Where(rm). // redeem_required = true + Where("counter <> suspend"). // and not suspended + Order("created_at ASC").Find(&readMarkers) + if len(readMarkers) > 0 { + swg := sizedwaitgroup.New(config.Configuration.RMRedeemNumWorkers) + for _, rmEntity := range readMarkers { + swg.Add() + go func(redeemCtx context.Context, rmEntity *ReadMarkerEntity) { + redeemCtx = datastore.GetStore().CreateTransaction(redeemCtx) + defer redeemCtx.Done() + err := RedeemReadMarker(redeemCtx, rmEntity) + if err != nil { + logging.Logger.Error("Error redeeming the read marker.", zap.Error(err)) + } + db := datastore.GetStore().GetTransaction(redeemCtx) + err = db.Commit().Error + if err != nil { + logging.Logger.Error("Error commiting the readmarker redeem", zap.Error(err)) + } + swg.Done() + }(ctx, rmEntity) + } + swg.Wait() + } + db.Rollback() + rctx.Done() + iterInprogress = false + } +} diff --git a/code/go/0chain.net/blobbercore/readmarker/worker.go b/code/go/0chain.net/blobbercore/readmarker/worker.go index c6a5fb3fc..7f30f838a 100644 --- a/code/go/0chain.net/blobbercore/readmarker/worker.go +++ b/code/go/0chain.net/blobbercore/readmarker/worker.go @@ -6,17 +6,15 @@ import ( "time" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" "github.com/0chain/blobber/code/go/0chain.net/core/chain" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" "github.com/0chain/blobber/code/go/0chain.net/core/transaction" - "github.com/remeh/sizedwaitgroup" "go.uber.org/zap" ) func SetupWorkers(ctx context.Context) { - go RedeemMarkers(ctx) + go startRedeemMarkers(ctx) } func RedeemReadMarker(ctx context.Context, rmEntity *ReadMarkerEntity) ( @@ -79,47 +77,14 @@ func RedeemReadMarker(ctx context.Context, rmEntity *ReadMarkerEntity) ( var iterInprogress = false -func RedeemMarkers(ctx context.Context) { +func startRedeemMarkers(ctx context.Context) { ticker := time.NewTicker(time.Duration(config.Configuration.RMRedeemFreq) * time.Second) for { select { case <-ctx.Done(): return case <-ticker.C: - if !iterInprogress { - iterInprogress = true - rctx := datastore.GetStore().CreateTransaction(ctx) - db := datastore.GetStore().GetTransaction(rctx) - readMarkers := make([]*ReadMarkerEntity, 0) - rm := &ReadMarkerEntity{RedeemRequired: true} - db.Where(rm). // redeem_required = true - Where("counter <> suspend"). // and not suspended - Order("created_at ASC").Find(&readMarkers) - if len(readMarkers) > 0 { - swg := sizedwaitgroup.New(config.Configuration.RMRedeemNumWorkers) - for _, rmEntity := range readMarkers { - swg.Add() - go func(redeemCtx context.Context, rmEntity *ReadMarkerEntity) { - redeemCtx = datastore.GetStore().CreateTransaction(redeemCtx) - defer redeemCtx.Done() - err := RedeemReadMarker(redeemCtx, rmEntity) - if err != nil { - Logger.Error("Error redeeming the read marker.", zap.Error(err)) - } - db := datastore.GetStore().GetTransaction(redeemCtx) - err = db.Commit().Error - if err != nil { - Logger.Error("Error commiting the readmarker redeem", zap.Error(err)) - } - swg.Done() - }(ctx, rmEntity) - } - swg.Wait() - } - db.Rollback() - rctx.Done() - iterInprogress = false - } + redeemReadMarker(ctx) } } diff --git a/code/go/0chain.net/blobbercore/reference/objectpath.go b/code/go/0chain.net/blobbercore/reference/objectpath.go index 3712b86e7..84d449131 100644 --- a/code/go/0chain.net/blobbercore/reference/objectpath.go +++ b/code/go/0chain.net/blobbercore/reference/objectpath.go @@ -12,6 +12,8 @@ type ObjectPath struct { Meta map[string]interface{} `json:"meta_data"` Path map[string]interface{} `json:"path"` FileBlockNum int64 `json:"file_block_num"` + ChunkSize int64 `json:"chunk_size"` + Size int64 `json:"size"` RefID int64 `json:"-"` } @@ -83,6 +85,8 @@ func GetObjectPath(ctx context.Context, allocationID string, blockNum int64) (*O retObj.Meta = curRef.GetListingData(ctx) retObj.Path = result retObj.FileBlockNum = remainingBlocks + retObj.ChunkSize = curRef.ChunkSize + retObj.Size = curRef.Size retObj.RefID = curRef.ID return &retObj, nil diff --git a/code/go/0chain.net/blobbercore/reference/ref.go b/code/go/0chain.net/blobbercore/reference/ref.go index f102a5cd5..ce3fa0272 100644 --- a/code/go/0chain.net/blobbercore/reference/ref.go +++ b/code/go/0chain.net/blobbercore/reference/ref.go @@ -90,6 +90,8 @@ type Ref struct { UpdatedAt time.Time `gorm:"column:updated_at" dirlist:"updated_at" filelist:"updated_at"` DeletedAt gorm.DeletedAt `gorm:"column:deleted_at"` // soft deletion + + ChunkSize int64 `gorm:"column:chunk_size" dirlist:"chunk_size" filelist:"chunk_size"` } type PaginatedRef struct { //Gorm smart select fields. @@ -123,12 +125,15 @@ type PaginatedRef struct { //Gorm smart select fields. CreatedAt time.Time `gorm:"column:created_at" json:"created_at,omitempty"` UpdatedAt time.Time `gorm:"column:updated_at" json:"updated_at,omitempty"` DeletedAt gorm.DeletedAt `gorm:"column:deleted_at" json:"-"` // soft deletion + + ChunkSize int64 `gorm:"column:chunk_size" dirlist:"chunk_size" filelist:"chunk_size"` } func (Ref) TableName() string { return "reference_objects" } +// GetReferenceLookup hash(allocationID + ":" + path) func GetReferenceLookup(allocationID string, path string) string { return encryption.Hash(allocationID + ":" + path) } @@ -166,6 +171,7 @@ func (r *Ref) SetAttributes(attr *Attributes) (err error) { return } +// GetReference get FileRef with allcationID and path from postgres func GetReference(ctx context.Context, allocationID string, path string) (*Ref, error) { ref := &Ref{} db := datastore.GetStore().GetTransaction(ctx) @@ -264,6 +270,7 @@ func (fr *Ref) GetFileHashData() string { hashArray = append(hashArray, strconv.FormatInt(fr.ActualFileSize, 10)) hashArray = append(hashArray, fr.ActualFileHash) hashArray = append(hashArray, string(fr.Attributes)) + hashArray = append(hashArray, strconv.FormatInt(fr.ChunkSize, 10)) return strings.Join(hashArray, ":") } @@ -272,7 +279,7 @@ func (fr *Ref) CalculateFileHash(ctx context.Context, saveToDB bool) (string, er // fmt.Println("Fileref hash data: " + fr.GetFileHashData()) fr.Hash = encryption.Hash(fr.GetFileHashData()) // fmt.Println("Fileref hash : " + fr.Hash) - fr.NumBlocks = int64(math.Ceil(float64(fr.Size*1.0) / CHUNK_SIZE)) + fr.NumBlocks = int64(math.Ceil(float64(fr.Size*1.0) / float64(fr.ChunkSize))) fr.PathHash = GetReferenceLookup(fr.AllocationID, fr.Path) fr.PathLevel = len(GetSubDirsFromPath(fr.Path)) + 1 //strings.Count(fr.Path, "/") fr.LookupHash = GetReferenceLookup(fr.AllocationID, fr.Path) @@ -284,6 +291,7 @@ func (fr *Ref) CalculateFileHash(ctx context.Context, saveToDB bool) (string, er } func (r *Ref) CalculateDirHash(ctx context.Context, saveToDB bool) (string, error) { + // empty directory, return hash directly if len(r.Children) == 0 && !r.childrenLoaded { return r.Hash, nil } @@ -374,6 +382,7 @@ func (r *Ref) Save(ctx context.Context) error { return db.Save(r).Error } +// GetListingData reflect and convert all fields into map[string]interface{} func (r *Ref) GetListingData(ctx context.Context) map[string]interface{} { if r == nil { return make(map[string]interface{}) diff --git a/code/go/0chain.net/blobbercore/reference/referencepath.go b/code/go/0chain.net/blobbercore/reference/referencepath.go index 6da91b3d8..a0a81f209 100644 --- a/code/go/0chain.net/blobbercore/reference/referencepath.go +++ b/code/go/0chain.net/blobbercore/reference/referencepath.go @@ -21,6 +21,7 @@ func GetReferencePath(ctx context.Context, allocationID string, path string) (*R return GetReferencePathFromPaths(ctx, allocationID, []string{path}) } +// GetReferencePathFromPaths validate and build full dir tree from db, and CalculateHash and return root Ref func GetReferencePathFromPaths(ctx context.Context, allocationID string, paths []string) (*Ref, error) { var refs []Ref db := datastore.GetStore().GetTransaction(ctx) @@ -41,11 +42,13 @@ func GetReferencePathFromPaths(ctx context.Context, allocationID string, paths [ } } + // root reference_objects with parent_path="" db = db.Or("parent_path = ? AND allocation_id = ?", "", allocationID) err := db.Order("level, lookup_hash").Find(&refs).Error if err != nil { return nil, err } + // there is no any child reference_objects for affected path, and instert root reference_objects if len(refs) == 0 { return &Ref{Type: DIRECTORY, AllocationID: allocationID, Name: "/", Path: "/", ParentPath: "", PathLevel: 1}, nil } @@ -55,9 +58,11 @@ func GetReferencePathFromPaths(ctx context.Context, allocationID string, paths [ return nil, common.NewError("invalid_dir_tree", "DB has invalid tree. Root not found in DB") } + // valdiate dir tree, and populate Ref's children for CalculateHash refMap := make(map[string]*Ref) refMap[rootRef.Path] = rootRef for i := 1; i < len(refs); i++ { + if _, ok := refMap[refs[i].ParentPath]; !ok { return nil, common.NewError("invalid_dir_tree", "DB has invalid tree.") } diff --git a/code/go/0chain.net/blobbercore/stats/handler.go b/code/go/0chain.net/blobbercore/stats/handler.go index bf1a1be0a..04614a126 100644 --- a/code/go/0chain.net/blobbercore/stats/handler.go +++ b/code/go/0chain.net/blobbercore/stats/handler.go @@ -6,10 +6,10 @@ import ( "html/template" "net/http" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" "github.com/0chain/blobber/code/go/0chain.net/core/common" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/0chain/gosdk/constants" "go.uber.org/zap" ) @@ -283,11 +283,11 @@ func StatsJSONHandler(ctx context.Context, r *http.Request) (interface{}, error) func GetStatsHandler(ctx context.Context, r *http.Request) (interface{}, error) { q := r.URL.Query() - ctx = context.WithValue(ctx, constants.ALLOCATION_CONTEXT_KEY, q.Get("allocation_id")) + ctx = context.WithValue(ctx, constants.ContextKeyAllocation, q.Get("allocation_id")) ctx = datastore.GetStore().CreateTransaction(ctx) db := datastore.GetStore().GetTransaction(ctx) defer db.Rollback() - allocationID := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + allocationID := ctx.Value(constants.ContextKeyAllocation).(string) bs := &BlobberStats{} if len(allocationID) != 0 { // TODO: Get only the allocation info from DB diff --git a/code/go/0chain.net/blobbercore/writemarker/entity.go b/code/go/0chain.net/blobbercore/writemarker/entity.go index 27b7c5e4f..52e4b10bb 100644 --- a/code/go/0chain.net/blobbercore/writemarker/entity.go +++ b/code/go/0chain.net/blobbercore/writemarker/entity.go @@ -57,7 +57,7 @@ func (wm *WriteMarkerEntity) UpdateStatus(ctx context.Context, db := datastore.GetStore().GetTransaction(ctx) statusBytes, _ := json.Marshal(statusMessage) - fmt.Println(string(statusBytes)) + if status == Failed { wm.ReedeemRetries++ err = db.Model(wm).Updates(WriteMarkerEntity{ @@ -98,6 +98,7 @@ func (wm *WriteMarkerEntity) UpdateStatus(ctx context.Context, return } +// GetWriteMarkerEntity get WriteMarkerEntity from postgres func GetWriteMarkerEntity(ctx context.Context, allocation_root string) (*WriteMarkerEntity, error) { db := datastore.GetStore().GetTransaction(ctx) wm := &WriteMarkerEntity{} diff --git a/code/go/0chain.net/blobbercore/writemarker/protocol.go b/code/go/0chain.net/blobbercore/writemarker/protocol.go index d4e3a7e56..cdad31027 100644 --- a/code/go/0chain.net/blobbercore/writemarker/protocol.go +++ b/code/go/0chain.net/blobbercore/writemarker/protocol.go @@ -3,16 +3,17 @@ package writemarker import ( "context" "encoding/json" + "fmt" "time" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" - "github.com/0chain/blobber/code/go/0chain.net/blobbercore/constants" "github.com/0chain/blobber/code/go/0chain.net/core/chain" "github.com/0chain/blobber/code/go/0chain.net/core/common" "github.com/0chain/blobber/code/go/0chain.net/core/encryption" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" "github.com/0chain/blobber/code/go/0chain.net/core/node" "github.com/0chain/blobber/code/go/0chain.net/core/transaction" + "github.com/0chain/gosdk/constants" "go.uber.org/zap" ) @@ -45,15 +46,15 @@ func (wm *WriteMarkerEntity) VerifyMarker(ctx context.Context, sa *allocation.Al } if wm.WM.Size != co.Size { - return common.NewError("write_marker_validation_failed", "Write Marker size does not match the connection size") + return common.NewError("write_marker_validation_failed", fmt.Sprintf("Write Marker size %v does not match the connection size %v", wm.WM.Size, co.Size)) } - clientPublicKey := ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) + clientPublicKey := ctx.Value(constants.ContextKeyClientKey).(string) if len(clientPublicKey) == 0 { return common.NewError("write_marker_validation_failed", "Could not get the public key of the client") } - clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) + clientID := ctx.Value(constants.ContextKeyClient).(string) if len(clientID) == 0 || clientID != wm.WM.ClientID || clientID != co.ClientID || co.ClientID != wm.WM.ClientID { return common.NewError("write_marker_validation_failed", "Write Marker is not by the same client who uploaded") } diff --git a/code/go/0chain.net/blobbercore/writemarker/worker.go b/code/go/0chain.net/blobbercore/writemarker/worker.go index 84a112a13..09fb734f4 100644 --- a/code/go/0chain.net/blobbercore/writemarker/worker.go +++ b/code/go/0chain.net/blobbercore/writemarker/worker.go @@ -8,13 +8,12 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" . "github.com/0chain/blobber/code/go/0chain.net/core/logging" - "github.com/remeh/sizedwaitgroup" "go.uber.org/zap" ) func SetupWorkers(ctx context.Context) { - go RedeemWriteMarkers(ctx) + go startRedeemWriteMarkers(ctx) } func RedeemMarkersForAllocation(ctx context.Context, allocationObj *allocation.Allocation) error { @@ -66,7 +65,7 @@ func RedeemMarkersForAllocation(ctx context.Context, allocationObj *allocation.A return nil } -func RedeemWriteMarkers(ctx context.Context) { +func startRedeemWriteMarkers(ctx context.Context) { var ticker = time.NewTicker( time.Duration(config.Configuration.WMRedeemFreq) * time.Second, ) @@ -77,27 +76,7 @@ func RedeemWriteMarkers(ctx context.Context) { case <-ticker.C: // Logger.Info("Trying to redeem writemarkers.", // zap.Any("numOfWorkers", numOfWorkers)) - rctx := datastore.GetStore().CreateTransaction(ctx) - db := datastore.GetStore().GetTransaction(rctx) - allocations := make([]*allocation.Allocation, 0) - alloc := &allocation.Allocation{IsRedeemRequired: true} - db.Where(alloc).Find(&allocations) - if len(allocations) > 0 { - swg := sizedwaitgroup.New(config.Configuration.WMRedeemNumWorkers) - for _, allocationObj := range allocations { - swg.Add() - go func(redeemCtx context.Context, allocationObj *allocation.Allocation) { - err := RedeemMarkersForAllocation(redeemCtx, allocationObj) - if err != nil { - Logger.Error("Error redeeming the write marker for allocation.", zap.Any("allocation", allocationObj.ID), zap.Error(err)) - } - swg.Done() - }(ctx, allocationObj) - } - swg.Wait() - } - db.Rollback() - rctx.Done() + redeemWriteMarker(ctx) } } diff --git a/code/go/0chain.net/blobbercore/writemarker/writemarker.go b/code/go/0chain.net/blobbercore/writemarker/writemarker.go new file mode 100644 index 000000000..99123d1e2 --- /dev/null +++ b/code/go/0chain.net/blobbercore/writemarker/writemarker.go @@ -0,0 +1,42 @@ +package writemarker + +import ( + "context" + + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/allocation" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/config" + "github.com/0chain/blobber/code/go/0chain.net/blobbercore/datastore" + "github.com/0chain/blobber/code/go/0chain.net/core/logging" + "github.com/remeh/sizedwaitgroup" + "go.uber.org/zap" +) + +func redeemWriteMarker(ctx context.Context) { + defer func() { + if r := recover(); r != nil { + logging.Logger.Error("[recover] redeemWriteMarker", zap.Any("err", r)) + } + }() + + rctx := datastore.GetStore().CreateTransaction(ctx) + db := datastore.GetStore().GetTransaction(rctx) + allocations := make([]*allocation.Allocation, 0) + alloc := &allocation.Allocation{IsRedeemRequired: true} + db.Where(alloc).Find(&allocations) + if len(allocations) > 0 { + swg := sizedwaitgroup.New(config.Configuration.WMRedeemNumWorkers) + for _, allocationObj := range allocations { + swg.Add() + go func(redeemCtx context.Context, allocationObj *allocation.Allocation) { + err := RedeemMarkersForAllocation(redeemCtx, allocationObj) + if err != nil { + logging.Logger.Error("Error redeeming the write marker for allocation.", zap.Any("allocation", allocationObj.ID), zap.Error(err)) + } + swg.Done() + }(ctx, allocationObj) + } + swg.Wait() + } + db.Rollback() + rctx.Done() +} diff --git a/code/go/0chain.net/core/common/types.go b/code/go/0chain.net/core/common/types.go index a5c79cda5..73448f77e 100644 --- a/code/go/0chain.net/core/common/types.go +++ b/code/go/0chain.net/core/common/types.go @@ -4,9 +4,6 @@ import ( "fmt" ) -/*ContextKey - type for key used to store values into context */ -type ContextKey string - // WhoPays for file downloading. type WhoPays int diff --git a/code/go/0chain.net/core/node/context.go b/code/go/0chain.net/core/node/context.go index 120c160cd..083851263 100644 --- a/code/go/0chain.net/core/node/context.go +++ b/code/go/0chain.net/core/node/context.go @@ -3,10 +3,10 @@ package node import ( "context" - "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/gosdk/constants" ) -const SELF_NODE common.ContextKey = "SELF_NODE" +const SELF_NODE constants.ContextKey = "SELF_NODE" /*GetNodeContext - setup a context with the self node */ func GetNodeContext() context.Context { diff --git a/code/go/0chain.net/core/util/merkle_tree.go b/code/go/0chain.net/core/util/merkle_tree.go deleted file mode 100644 index 675b05df2..000000000 --- a/code/go/0chain.net/core/util/merkle_tree.go +++ /dev/null @@ -1,148 +0,0 @@ -package util - -import ( - "fmt" -) - -/*MerkleTree - A data structure that implements MerkleTreeI interface */ -type MerkleTree struct { - tree []string - leavesCount int - levels int -} - -func VerifyMerklePath(hash string, path *MTPath, root string) bool { - mthash := hash - pathNodes := path.Nodes - pl := len(pathNodes) - idx := path.LeafIndex - for i := 0; i < pl; i++ { - if idx&1 == 1 { - mthash = MHash(pathNodes[i], mthash) - } else { - mthash = MHash(mthash, pathNodes[i]) - } - idx = (idx - idx&1) / 2 - } - return mthash == root -} - -func (mt *MerkleTree) computeSize(leaves int) (int, int) { - if leaves == 1 { - return 2, 2 - } - var tsize int - var levels int - for ll := leaves; ll > 1; ll = (ll + 1) / 2 { - tsize += ll - levels++ - } - tsize++ - levels++ - return tsize, levels -} - -/*ComputeTree - given the leaf nodes, compute the merkle tree */ -func (mt *MerkleTree) ComputeTree(hashes []Hashable) { - var tsize int - tsize, mt.levels = mt.computeSize(len(hashes)) - mt.leavesCount = len(hashes) - mt.tree = make([]string, tsize) - for idx, hashable := range hashes { - mt.tree[idx] = hashable.GetHash() - } - if len(hashes) == 1 { - mt.tree[1] = MHash(mt.tree[0], mt.tree[0]) - return - } - for pl0, plsize := 0, mt.leavesCount; plsize > 1; pl0, plsize = pl0+plsize, (plsize+1)/2 { - l0 := pl0 + plsize - for i, j := 0, 0; i < plsize; i, j = i+2, j+1 { - mt.tree[pl0+plsize+j] = MHash(mt.tree[pl0+i], mt.tree[pl0+i+1]) - } - if plsize&1 == 1 { - mt.tree[l0+plsize/2] = MHash(mt.tree[pl0+plsize-1], mt.tree[pl0+plsize-1]) - } - } -} - -/*GetRoot - get the root of the merkle tree */ -func (mt *MerkleTree) GetRoot() string { - return mt.tree[len(mt.tree)-1] -} - -/*GetTree - get the entire merkle tree */ -func (mt *MerkleTree) GetTree() []string { - return mt.tree -} - -/*SetTree - set the entire merkle tree */ -func (mt *MerkleTree) SetTree(leavesCount int, tree []string) error { - size, levels := mt.computeSize(leavesCount) - if size != len(tree) { - return fmt.Errorf("Merkle tree with leaves %v should have size %v but only %v is given", leavesCount, size, len(tree)) - } - mt.levels = levels - mt.tree = tree - mt.leavesCount = leavesCount - return nil -} - -/*GetLeafIndex - Get the index of the leaf node in the tree */ -func (mt *MerkleTree) GetLeafIndex(hash Hashable) int { - hs := hash.GetHash() - for i := 0; i < mt.leavesCount; i++ { - if mt.tree[i] == hs { - return i - } - } - return -1 -} - -/*GetPath - get the path that can be used to verify the merkle tree */ -func (mt *MerkleTree) GetPath(hash Hashable) *MTPath { - hidx := mt.GetLeafIndex(hash) - if hidx < 0 { - return &MTPath{} - } - return mt.GetPathByIndex(hidx) -} - -/*VerifyPath - given a leaf node and the path, verify that the node is part of the tree */ -func (mt *MerkleTree) VerifyPath(hash Hashable, path *MTPath) bool { - hs := hash.GetHash() - return VerifyMerklePath(hs, path, mt.GetRoot()) -} - -/*GetPathByIndex - get the path of a leaf node at index i */ -func (mt *MerkleTree) GetPathByIndex(idx int) *MTPath { - path := make([]string, mt.levels-1, mt.levels-1) //nolint:gosimple // need more time to verify: declaring capacity is probably necessary? - mpath := &MTPath{LeafIndex: idx} - if idx&1 == 1 { - path[0] = mt.tree[idx-1] - } else { - if idx+1 < mt.leavesCount { - path[0] = mt.tree[idx+1] - } else { - path[0] = mt.tree[idx] - } - } - for pl0, plsize, pi := 0, mt.leavesCount, 1; plsize > 2; pl0, plsize, pi = pl0+plsize, (plsize+1)/2, pi+1 { - l0 := pl0 + plsize - idx = (idx - idx&1) / 2 - if idx&1 == 1 { - //path = append(path, mt.tree[l0+idx-1]) - path[pi] = mt.tree[l0+idx-1] - } else { - if l0+idx+1 < l0+(plsize+1)/2 { - //path = append(path, mt.tree[l0+idx+1]) - path[pi] = mt.tree[l0+idx+1] - } else { - //path = append(path, mt.tree[l0+idx]) - path[pi] = mt.tree[l0+idx] - } - } - } - mpath.Nodes = path - return mpath -} diff --git a/code/go/0chain.net/core/util/merkle_tree_interface.go b/code/go/0chain.net/core/util/merkle_tree_interface.go deleted file mode 100644 index 4eeea3ede..000000000 --- a/code/go/0chain.net/core/util/merkle_tree_interface.go +++ /dev/null @@ -1,53 +0,0 @@ -package util - -import "github.com/0chain/blobber/code/go/0chain.net/core/encryption" - -/*MerkleTreeI - a merkle tree interface required for constructing and providing verification */ -type MerkleTreeI interface { - //API to create a tree from leaf nodes - ComputeTree(hashes []Hashable) - GetRoot() string - GetTree() []string - - //API to load an existing tree - SetTree(leavesCount int, tree []string) error - - // API for verification when the leaf node is known - GetPath(hash Hashable) *MTPath // Server needs to provide this - VerifyPath(hash Hashable, path *MTPath) bool //This is only required by a client but useful for testing - - /* API for random verification when the leaf node is uknown - (verification of the data to hash used as leaf node is outside this API) */ - GetPathByIndex(idx int) *MTPath -} - -/*MTPath - The merkle tree path*/ -type MTPath struct { - Nodes []string `json:"nodes"` - LeafIndex int `json:"leaf_index"` -} - -/*Hash - the hashing used for the merkle tree construction */ -func Hash(text string) string { - return encryption.Hash(text) -} - -/*MHash - merkle hashing of a pair of child hashes */ -func MHash(h1 string, h2 string) string { - return Hash(h1 + h2) -} - -type StringHashable struct { - Hash string -} - -func NewStringHashable(hash string) *StringHashable { - return &StringHashable{Hash: hash} -} - -func (sh *StringHashable) GetHash() string { - return sh.Hash -} -func (sh *StringHashable) GetHashBytes() []byte { - return []byte(sh.Hash) -} diff --git a/code/go/0chain.net/core/util/merkle_tree_test.go b/code/go/0chain.net/core/util/merkle_tree_test.go deleted file mode 100644 index 57fe65380..000000000 --- a/code/go/0chain.net/core/util/merkle_tree_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package util - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/0chain/blobber/code/go/0chain.net/core/encryption" -) - -type Txn struct { - data string -} - -func (t *Txn) GetHash() string { - return t.data -} - -func (t *Txn) GetHashBytes() []byte { - return encryption.RawHash(t.data) -} - -func (t *Txn) Encode() []byte { - return []byte(t.data) -} - -func (t *Txn) Decode(data []byte) error { - t.data = string(data) - return nil -} - -func TestMerkleTreeComputeTree(t *testing.T) { - txns := make([]Hashable, 100) - for i := 0; i < len(txns); i++ { - txns[i] = &Txn{data: fmt.Sprintf("%v", 1001-i)} - } - var mt MerkleTreeI = &MerkleTree{} - mt.ComputeTree(txns) - tree := mt.GetTree() - if len(tree) != 202 { - fmt.Printf("%v: %v\n", len(tree), tree) - } -} - -func TestMerkleTreeGetNVerifyPath(t *testing.T) { - txns := make([]Hashable, 101) - for i := 0; i < len(txns); i++ { - txns[i] = &Txn{data: fmt.Sprintf("1000%v", i)} - } - var mt MerkleTreeI = &MerkleTree{} - mt.ComputeTree(txns) - for i := 0; i < len(txns); i++ { - path := mt.GetPath(txns[i]) - if !mt.VerifyPath(txns[i], path) { - fmt.Printf("path: %v %v\n", txns[i], path) - } - } -} - -func TestMerkleTreeSetTree(t *testing.T) { - txns := make([]Hashable, 100) - for i := 0; i < len(txns); i++ { - txns[i] = &Txn{data: fmt.Sprintf("%v", 1001-i)} - } - var mt MerkleTreeI = &MerkleTree{} - mt.ComputeTree(txns) - var mt2 MerkleTreeI = &MerkleTree{} - err := mt2.SetTree(len(txns), mt.GetTree()) - if err != nil { - t.Error(err) - } - if mt.GetRoot() != mt2.GetRoot() { - t.Errorf("Merkle roots didn't match") - } -} - -func BenchmarkMerkleTreeComputeTree(b *testing.B) { - txns := make([]Hashable, 10000) - for i := 0; i < len(txns); i++ { - txns[i] = &Txn{data: fmt.Sprintf("%v", len(txns)-i)} - } - for i := 0; i < b.N; i++ { - var mt MerkleTreeI = &MerkleTree{} - mt.ComputeTree(txns) - } -} - -func BenchmarkMerkleTreeGetPath(b *testing.B) { - txns := make([]Hashable, 10000) - for i := 0; i < len(txns); i++ { - txns[i] = &Txn{data: fmt.Sprintf("%v", len(txns)-i)} - } - var mt MerkleTreeI = &MerkleTree{} - mt.ComputeTree(txns) - for i := 0; i < b.N; i++ { - j := rand.Intn(len(txns)) - mt.GetPath(txns[j]) - } -} - -func BenchmarkMerkleTreeVerifyPath(b *testing.B) { - txns := make([]Hashable, 10000) - for i := 0; i < len(txns); i++ { - txns[i] = &Txn{data: fmt.Sprintf("%v", len(txns)-i)} - } - var mt MerkleTreeI = &MerkleTree{} - mt.ComputeTree(txns) - paths := make([]*MTPath, len(txns)) - for j := 0; j < len(txns); j++ { - paths[j] = mt.GetPath(txns[j]) - } - for i := 0; i < b.N; i++ { - j := rand.Intn(len(txns)) - - if !mt.VerifyPath(txns[j], paths[j]) { - fmt.Printf("path verification failed") - return - } - } -} diff --git a/code/go/0chain.net/dev/miner/init.go b/code/go/0chain.net/dev/miner/init.go new file mode 100644 index 000000000..5bd64dbb8 --- /dev/null +++ b/code/go/0chain.net/dev/miner/init.go @@ -0,0 +1,11 @@ +package miner + +import ( + "github.com/gorilla/mux" +) + +func RegisterHandlers(s *mux.Router) { + // s.HandleFunc("/v1/file/upload/{allocation}", uploadAndUpdateFile).Methods(http.MethodPut, http.MethodPost) + // s.HandleFunc("/v1/file/referencepath/{allocation}", getReference).Methods(http.MethodGet) + // s.HandleFunc("/v1/connection/commit/{allocation}", commitWrite).Methods(http.MethodPost) +} diff --git a/code/go/0chain.net/dev/server.go b/code/go/0chain.net/dev/server.go new file mode 100644 index 000000000..0c213562a --- /dev/null +++ b/code/go/0chain.net/dev/server.go @@ -0,0 +1,45 @@ +// package dev providers tools for local development +package dev + +import ( + "net/http/httptest" + + "github.com/0chain/blobber/code/go/0chain.net/dev/miner" + "github.com/0chain/blobber/code/go/0chain.net/dev/sharder" + "github.com/gorilla/mux" +) + +// Server a local dev server to mock server APIs +type Server struct { + *httptest.Server + *mux.Router +} + +// NewServer create a local dev server +func NewServer() *Server { + router := mux.NewRouter() + s := &Server{ + Router: router, + Server: httptest.NewServer(router), + } + + return s +} + +// NewSharderServer create a local dev sharder server +func NewSharderServer() *Server { + s := NewServer() + + sharder.RegisterHandlers(s.Router) + + return s +} + +// NewMinerServer create a local dev miner server +func NewMinerServer() *Server { + s := NewServer() + + miner.RegisterHandlers(s.Router) + + return s +} diff --git a/code/go/0chain.net/dev/sharder/init.go b/code/go/0chain.net/dev/sharder/init.go new file mode 100644 index 000000000..f6992d3cc --- /dev/null +++ b/code/go/0chain.net/dev/sharder/init.go @@ -0,0 +1,11 @@ +package sharder + +import ( + "github.com/gorilla/mux" +) + +func RegisterHandlers(s *mux.Router) { + // s.HandleFunc("/v1/file/upload/{allocation}", uploadAndUpdateFile).Methods(http.MethodPut, http.MethodPost) + // s.HandleFunc("/v1/file/referencepath/{allocation}", getReference).Methods(http.MethodGet) + // s.HandleFunc("/v1/connection/commit/{allocation}", commitWrite).Methods(http.MethodPost) +} diff --git a/code/go/0chain.net/validator/main.go b/code/go/0chain.net/validator/main.go index 4d673d13e..7a1b3e0c0 100644 --- a/code/go/0chain.net/validator/main.go +++ b/code/go/0chain.net/validator/main.go @@ -44,11 +44,12 @@ func main() { logDir := flag.String("log_dir", "", "log_dir") portString := flag.String("port", "", "port") hostname := flag.String("hostname", "", "hostname") + configDir := flag.String("config_dir", "./config", "config_dir") flag.Parse() config.SetupDefaultConfig() - config.SetupConfig() + config.SetupConfig(*configDir) config.Configuration.DeploymentMode = byte(*deploymentMode) diff --git a/code/go/0chain.net/validatorcore/config/config.go b/code/go/0chain.net/validatorcore/config/config.go index f6ae4ce8f..4b59c4bd7 100644 --- a/code/go/0chain.net/validatorcore/config/config.go +++ b/code/go/0chain.net/validatorcore/config/config.go @@ -17,9 +17,14 @@ func SetupDefaultConfig() { } /*SetupConfig - setup the configuration system */ -func SetupConfig() { +func SetupConfig(configDir string) { viper.SetConfigName("0chain_validator") - viper.AddConfigPath("./config") + if configDir == "" { + viper.AddConfigPath("./config") + } else { + viper.AddConfigPath(configDir) + } + err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", err)) diff --git a/code/go/0chain.net/validatorcore/storage/challenge_handler.go b/code/go/0chain.net/validatorcore/storage/challenge_handler.go index ea3a78d01..28add0fd9 100644 --- a/code/go/0chain.net/validatorcore/storage/challenge_handler.go +++ b/code/go/0chain.net/validatorcore/storage/challenge_handler.go @@ -87,6 +87,7 @@ func ChallengeHandler(ctx context.Context, r *http.Request) (interface{}, error) validationTicket.ValidatorID = node.Self.ID validationTicket.ValidatorKey = node.Self.PublicKey validationTicket.Timestamp = common.Now() + if err := validationTicket.Sign(); err != nil { return nil, common.NewError("invalid_parameters", err.Error()) } diff --git a/code/go/0chain.net/validatorcore/storage/context.go b/code/go/0chain.net/validatorcore/storage/context.go index 5cfa922c8..86a2acb5d 100644 --- a/code/go/0chain.net/validatorcore/storage/context.go +++ b/code/go/0chain.net/validatorcore/storage/context.go @@ -5,12 +5,13 @@ import ( "net/http" "github.com/0chain/blobber/code/go/0chain.net/core/common" + "github.com/0chain/gosdk/constants" ) func SetupContext(handler common.JSONResponderF) common.JSONResponderF { return func(ctx context.Context, r *http.Request) (interface{}, error) { - ctx = context.WithValue(ctx, CLIENT_CONTEXT_KEY, r.Header.Get(common.ClientHeader)) - ctx = context.WithValue(ctx, CLIENT_KEY_CONTEXT_KEY, + ctx = context.WithValue(ctx, constants.ContextKeyClient, r.Header.Get(common.ClientHeader)) + ctx = context.WithValue(ctx, constants.ContextKeyClientKey, r.Header.Get(common.ClientKeyHeader)) res, err := handler(ctx, r) return res, err diff --git a/code/go/0chain.net/validatorcore/storage/models.go b/code/go/0chain.net/validatorcore/storage/models.go index b64296932..a5f93d0fb 100644 --- a/code/go/0chain.net/validatorcore/storage/models.go +++ b/code/go/0chain.net/validatorcore/storage/models.go @@ -1,6 +1,7 @@ package storage import ( + "bytes" "encoding/json" "fmt" "math/rand" @@ -11,8 +12,8 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/core/common" "github.com/0chain/blobber/code/go/0chain.net/core/encryption" "github.com/0chain/blobber/code/go/0chain.net/core/node" - "github.com/0chain/blobber/code/go/0chain.net/core/util" "github.com/0chain/blobber/code/go/0chain.net/validatorcore/storage/writemarker" + "github.com/0chain/gosdk/core/util" "github.com/mitchellh/mapstructure" @@ -91,6 +92,7 @@ type FileMetaData struct { MerkleRoot string `json:"merkle_root" mapstructure:"merkle_root"` ActualFileSize int64 `json:"actual_file_size" mapstructure:"actual_file_size"` ActualFileHash string `json:"actual_file_hash" mapstructure:"actual_file_hash"` + ChunkSize int64 `json:"chunk_size" mapstructure:"chunk_size"` Attributes Attributes `json:"attributes" mapstructure:"attributes" ` } @@ -106,6 +108,7 @@ func (fr *FileMetaData) GetHashData() string { hashArray = append(hashArray, strconv.FormatInt(fr.ActualFileSize, 10)) hashArray = append(hashArray, fr.ActualFileHash) hashArray = append(hashArray, fr.Attributes.String()) + hashArray = append(hashArray, strconv.FormatInt(fr.ChunkSize, 10)) return strings.Join(hashArray, ":") } @@ -286,6 +289,7 @@ type ChallengeRequest struct { WriteMarkers []*writemarker.WriteMarkerEntity `json:"write_markers,omitempty"` DataBlock []byte `json:"data,omitempty"` MerklePath *util.MTPath `json:"merkle_path,omitempty"` + ChunkSize int64 `json:"chunk_size,omitempty"` } func (cr *ChallengeRequest) VerifyChallenge(challengeObj *Challenge, allocationObj *Allocation) error { @@ -326,8 +330,15 @@ func (cr *ChallengeRequest) VerifyChallenge(challengeObj *Challenge, allocationO } Logger.Info("Verifying data block and merkle path", zap.Any("challenge_id", challengeObj.ID)) - contentHash := encryption.Hash(cr.DataBlock) - merkleVerify := util.VerifyMerklePath(contentHash, cr.MerklePath, cr.ObjPath.Meta.MerkleRoot) + //contentHash := encryption.Hash(cr.DataBlock) + + contentHasher := util.NewCompactMerkleTree(nil) + err = contentHasher.Reload(cr.ChunkSize, bytes.NewReader(cr.DataBlock)) + if err != nil { + return common.NewError("challenge_validation_failed", "Failed to calculate content hash for the data block") + } + + merkleVerify := util.VerifyMerklePath(contentHasher.GetMerkleRoot(), cr.MerklePath, cr.ObjPath.Meta.MerkleRoot) if !merkleVerify { return common.NewError("challenge_validation_failed", "Failed to verify the merkle path for the data block") } diff --git a/code/go/0chain.net/validatorcore/storage/models_test.go b/code/go/0chain.net/validatorcore/storage/models_test.go index c684c0d9a..8b56b577e 100644 --- a/code/go/0chain.net/validatorcore/storage/models_test.go +++ b/code/go/0chain.net/validatorcore/storage/models_test.go @@ -141,7 +141,7 @@ func TestFileMetaData_GetHashData(t *testing.T) { WhoPaysForReads: common.WhoPaysOwner, }, }, - want: "::::0:::0::{}", + want: "::::0:::0::{}:0", // want: "::::0:::0::{\"who_pays_for_reads\":0}", }, { @@ -152,14 +152,14 @@ func TestFileMetaData_GetHashData(t *testing.T) { WhoPaysForReads: common.WhoPays3rdParty, }, }, - want: "::::0:::0::{\"who_pays_for_reads\":1}", + want: "::::0:::0::{\"who_pays_for_reads\":1}:0", }, { name: "with Attributes.WhoPays = nil", fmd: storage.FileMetaData{ DirMetaData: storage.DirMetaData{}, }, - want: "::::0:::0::{}", + want: "::::0:::0::{}:0", }, } @@ -192,7 +192,7 @@ func TestFileMetaData_CalculateHash(t *testing.T) { WhoPaysForReads: common.WhoPaysOwner, }, }, - want: "f78718c8ad33d8b97fe902dabc36df401f82c88bde608ab85005d332ac24de43", + want: "2f3e218c5c0a759f8267f8b86d8438c5416c1c8499cd483f1fae428fe97eca2d", // want: "a9862b25db264157a540dc3ecf7aae331f377aeb3c2ef9c59951e1c8c3e3bc15", }, { @@ -202,12 +202,12 @@ func TestFileMetaData_CalculateHash(t *testing.T) { WhoPaysForReads: common.WhoPays3rdParty, }, }, - want: "2cc4e60833e5bf1e018910ce256d12374f1d8e87beade0f5c2a63770e6b8a445", + want: "b5553813cb69c0973b25ba19d9fc7748bd8eb4cc3ee7925f55750c96b08c5804", }, { name: "with Attributes.WhoPays = nil", fmd: storage.FileMetaData{}, - want: "f78718c8ad33d8b97fe902dabc36df401f82c88bde608ab85005d332ac24de43", + want: "2f3e218c5c0a759f8267f8b86d8438c5416c1c8499cd483f1fae428fe97eca2d", }, } @@ -269,7 +269,7 @@ func TestObjectPath_Parse(t *testing.T) { "list": []map[string]interface{}{ map[string]interface{}{ "path": "file.txt", - "hash": "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", + "hash": "7f0823cc8a8a4bcf22335e87e7e629eda04b6517b00c2db4049b125ffb634cb1", "type": "f", }, }, @@ -443,15 +443,15 @@ func TestObjectPath_VerifyPath(t *testing.T) { { name: "dir/file", objPath: &storage.ObjectPath{ - RootHash: "b25a7f67d4206d77fca08a48a06eba893c59077ea61435f71b31d098ea2f7991", + RootHash: "8bbf60d57fa672bbd6bcc49926148afd39930dff3a54722a905c3f12e150e29e", Path: map[string]interface{}{ "path": "dir1", - "hash": "b25a7f67d4206d77fca08a48a06eba893c59077ea61435f71b31d098ea2f7991", + "hash": "8bbf60d57fa672bbd6bcc49926148afd39930dff3a54722a905c3f12e150e29e", "type": "d", "list": []map[string]interface{}{ map[string]interface{}{ "path": "file.txt", - "hash": "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", + "hash": "7f0823cc8a8a4bcf22335e87e7e629eda04b6517b00c2db4049b125ffb634cb1", "type": "f", }, }, @@ -461,7 +461,7 @@ func TestObjectPath_VerifyPath(t *testing.T) { Type: storage.DIRECTORY, Name: "", Path: "dir1", - Hash: "b25a7f67d4206d77fca08a48a06eba893c59077ea61435f71b31d098ea2f7991", + Hash: "8bbf60d57fa672bbd6bcc49926148afd39930dff3a54722a905c3f12e150e29e", PathHash: "", NumBlocks: int64(0), AllocationID: "", @@ -472,7 +472,7 @@ func TestObjectPath_VerifyPath(t *testing.T) { Type: storage.FILE, Name: "", Path: "file.txt", - Hash: "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", + Hash: "7f0823cc8a8a4bcf22335e87e7e629eda04b6517b00c2db4049b125ffb634cb1", PathHash: "", NumBlocks: int64(0), AllocationID: "1", @@ -497,12 +497,12 @@ func TestObjectPath_VerifyPath(t *testing.T) { RootHash: "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", Path: map[string]interface{}{ "path": "dir1", - "hash": "b25a7f67d4206d77fca08a48a06eba893c59077ea61435f71b31d098ea2f7991", + "hash": "8bbf60d57fa672bbd6bcc49926148afd39930dff3a54722a905c3f12e150e29e", "type": "d", "list": []map[string]interface{}{ map[string]interface{}{ "path": "file.txt", - "hash": "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", + "hash": "7f0823cc8a8a4bcf22335e87e7e629eda04b6517b00c2db4049b125ffb634cb1", "type": "f", }, }, @@ -512,7 +512,7 @@ func TestObjectPath_VerifyPath(t *testing.T) { Type: storage.DIRECTORY, Name: "", Path: "dir1", - Hash: "b25a7f67d4206d77fca08a48a06eba893c59077ea61435f71b31d098ea2f7991", + Hash: "8bbf60d57fa672bbd6bcc49926148afd39930dff3a54722a905c3f12e150e29e", PathHash: "", NumBlocks: int64(0), AllocationID: "", @@ -523,7 +523,7 @@ func TestObjectPath_VerifyPath(t *testing.T) { Type: storage.FILE, Name: "", Path: "file.txt", - Hash: "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", + Hash: "7f0823cc8a8a4bcf22335e87e7e629eda04b6517b00c2db4049b125ffb634cb1", PathHash: "", NumBlocks: int64(0), AllocationID: "1", @@ -547,20 +547,20 @@ func TestObjectPath_VerifyPath(t *testing.T) { { name: "dir/dir/file", objPath: &storage.ObjectPath{ - RootHash: "a02b02080606e78e165fe5a42f8b0087ff82617a1f9c26cc95e269fd653c5a72", + RootHash: "eb5167bd68f64f8b1dfae1f14230b43601bd1d42060dd1c2ec69a3674828018a", Path: map[string]interface{}{ "path": "dir1", - "hash": "a02b02080606e78e165fe5a42f8b0087ff82617a1f9c26cc95e269fd653c5a72", + "hash": "eb5167bd68f64f8b1dfae1f14230b43601bd1d42060dd1c2ec69a3674828018a", "type": "d", "list": []map[string]interface{}{ map[string]interface{}{ "path": "dir2", - "hash": "b25a7f67d4206d77fca08a48a06eba893c59077ea61435f71b31d098ea2f7991", + "hash": "8bbf60d57fa672bbd6bcc49926148afd39930dff3a54722a905c3f12e150e29e", "type": "d", "list": []map[string]interface{}{ map[string]interface{}{ "path": "file.txt", - "hash": "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", + "hash": "7f0823cc8a8a4bcf22335e87e7e629eda04b6517b00c2db4049b125ffb634cb1", "type": "f", }, }, @@ -572,7 +572,7 @@ func TestObjectPath_VerifyPath(t *testing.T) { Type: storage.DIRECTORY, Name: "", Path: "dir1", - Hash: "a02b02080606e78e165fe5a42f8b0087ff82617a1f9c26cc95e269fd653c5a72", + Hash: "eb5167bd68f64f8b1dfae1f14230b43601bd1d42060dd1c2ec69a3674828018a", PathHash: "", NumBlocks: int64(0), AllocationID: "", @@ -582,7 +582,7 @@ func TestObjectPath_VerifyPath(t *testing.T) { Type: storage.DIRECTORY, Name: "", Path: "dir2", - Hash: "b25a7f67d4206d77fca08a48a06eba893c59077ea61435f71b31d098ea2f7991", + Hash: "8bbf60d57fa672bbd6bcc49926148afd39930dff3a54722a905c3f12e150e29e", PathHash: "", NumBlocks: int64(0), AllocationID: "1", @@ -593,7 +593,7 @@ func TestObjectPath_VerifyPath(t *testing.T) { Type: storage.FILE, Name: "", Path: "file.txt", - Hash: "87177591985fdf5c010d7781f0dc82b5d3c40b6bf8892b3c69000eb000f1e33a", + Hash: "7f0823cc8a8a4bcf22335e87e7e629eda04b6517b00c2db4049b125ffb634cb1", PathHash: "", NumBlocks: int64(0), AllocationID: "1", diff --git a/code/go/0chain.net/validatorcore/storage/protocol.go b/code/go/0chain.net/validatorcore/storage/protocol.go index c58319887..192027a3d 100644 --- a/code/go/0chain.net/validatorcore/storage/protocol.go +++ b/code/go/0chain.net/validatorcore/storage/protocol.go @@ -13,14 +13,12 @@ import ( "github.com/0chain/blobber/code/go/0chain.net/core/transaction" "github.com/0chain/blobber/code/go/0chain.net/validatorcore/config" + "github.com/0chain/gosdk/constants" "github.com/0chain/gosdk/zcncore" "go.uber.org/zap" ) const CHUNK_SIZE = 64 * 1024 -const ALLOCATION_CONTEXT_KEY common.ContextKey = "allocation" -const CLIENT_CONTEXT_KEY common.ContextKey = "client" -const CLIENT_KEY_CONTEXT_KEY common.ContextKey = "client_key" type StorageNode struct { ID string `json:"id"` @@ -84,7 +82,7 @@ func (sp *ValidatorProtocolImpl) VerifyAllocationTransaction(ctx context.Context } func (sp *ValidatorProtocolImpl) VerifyChallengeTransaction(ctx context.Context, challengeRequest *ChallengeRequest) (*Challenge, error) { - blobberID := ctx.Value(CLIENT_CONTEXT_KEY).(string) + blobberID := ctx.Value(constants.ContextKeyClient).(string) if len(blobberID) == 0 { return nil, common.NewError("invalid_client", "Call from an invalid client") } diff --git a/config/0chain_blobber.yaml b/config/0chain_blobber.yaml index 155ca445f..2eb5bce8e 100755 --- a/config/0chain_blobber.yaml +++ b/config/0chain_blobber.yaml @@ -1,8 +1,8 @@ version: 1.0 logging: - level: "info" - console: false # printing log to console is only supported in development mode + level: "error" + console: true # printing log to console is only supported in development mode # for testing # 500 MB - 536870912 @@ -36,7 +36,7 @@ challenge_completion_time: 2m # duration to complete a challenge # a task and redeem tokens, it should be big enough read_lock_timeout: 1m write_lock_timeout: 1m -max_file_size: 10485760 #10MB +max_file_size: 104857600 #10MB # update_allocations_interval used to refresh known allocation objects from SC update_allocations_interval: 1m @@ -59,7 +59,7 @@ min_confirmation: 50 block_worker: http://198.18.0.98:9091 handlers: - rate_limit: 10 # 10 per second + rate_limit: 0 # 10 per second . it can't too small one if a large file is download with blocks server_chain: id: "0afc093ffb509f059c55478bc1a60351cef7b4e9c008a53a6cc8241ca8617dfe" @@ -73,7 +73,7 @@ contentref_cleaner: tolerance: 3600 openconnection_cleaner: frequency: 30 - tolerance: 3600 + tolerance: 3600 # 60 * 60 writemarker_redeem: frequency: 10 num_workers: 5 diff --git a/config/0chain_validator.yaml b/config/0chain_validator.yaml index 0173a38c7..af24b28ba 100644 --- a/config/0chain_validator.yaml +++ b/config/0chain_validator.yaml @@ -17,8 +17,8 @@ handlers: rate_limit: 10 # 10 per second logging: - level: "info" - console: false # printing log to console is only supported in development mode + level: "error" + console: true # printing log to console is only supported in development mode server_chain: id: "0afc093ffb509f059c55478bc1a60351cef7b4e9c008a53a6cc8241ca8617dfe" diff --git a/dev.local/README.md b/dev.local/README.md new file mode 100644 index 000000000..7662b4cf3 --- /dev/null +++ b/dev.local/README.md @@ -0,0 +1,86 @@ +# blobber development guide + + + + +## install `postgres:11` in docker as shared database for blobbers and validators + +``` +********************************************** + Welcome to blobber/validator development CLI +********************************************** + + +Please select which blobber/validator you will work on: +1) 1 +2) 2 +3) 3 +4) clean all +#? 1 + +********************************************** + Blobber/Validator 1 +********************************************** + +Please select what you will do: +1) install postgres +2) start blobber +3) start validator +#? 1 +``` + + +It will check and install a `blobber_postgres` container as shared database for all blobbers and validators, and initialized database `blobber_meta$i` and user `blobber_user$i` + +## run local validator instance + +``` +********************************************** + Welcome to blobber/validator development CLI +********************************************** + + +Please select which blobber/validator you will work on: +1) 1 +2) 2 +3) 3 +4) clean all +#? 1 + +********************************************** + Blobber/Validator 1 +********************************************** + +Please select what you will do: +1) install postgres 3) start validator +2) start blobber 4) clean +#? 3 + +``` + + +## run local blobber instance + +``` +********************************************** + Welcome to blobber/validator development CLI +********************************************** + + +Please select which blobber/validator you will work on: +1) 1 +2) 2 +3) 3 +4) clean all +#? 1 + +********************************************** + Blobber/Validator 1 +********************************************** + +Please select what you will do: +1) install postgres 3) start validator +2) start blobber 4) clean +#? 2 + +``` \ No newline at end of file diff --git a/dev.local/cli.sh b/dev.local/cli.sh new file mode 100755 index 000000000..06216cecb --- /dev/null +++ b/dev.local/cli.sh @@ -0,0 +1,249 @@ +#!/bin/bash + +set -e + +root=$(pwd) +hostname=`ifconfig | grep "inet " | grep -Fv 127.0.0.1 | grep broadcast | awk '{print $2; exit}'` + + +ips=`ifconfig | grep "inet " | grep 198.18.0 | wc -l` + + +#fix docker network issue for Mac OS X platform +if [ "$(uname)" == "Darwin" ] && [ $ips != 31 ] +then + # 0dns + sudo ifconfig lo0 alias 198.18.0.98 + # sharders + sudo ifconfig lo0 alias 198.18.0.81 + sudo ifconfig lo0 alias 198.18.0.82 + sudo ifconfig lo0 alias 198.18.0.83 + sudo ifconfig lo0 alias 198.18.0.84 + sudo ifconfig lo0 alias 198.18.0.85 + sudo ifconfig lo0 alias 198.18.0.86 + sudo ifconfig lo0 alias 198.18.0.87 + sudo ifconfig lo0 alias 198.18.0.88 + # miners + sudo ifconfig lo0 alias 198.18.0.71 + sudo ifconfig lo0 alias 198.18.0.72 + sudo ifconfig lo0 alias 198.18.0.73 + sudo ifconfig lo0 alias 198.18.0.74 + sudo ifconfig lo0 alias 198.18.0.75 + sudo ifconfig lo0 alias 198.18.0.76 + sudo ifconfig lo0 alias 198.18.0.77 + sudo ifconfig lo0 alias 198.18.0.78 + # blobbers + sudo ifconfig lo0 alias 198.18.0.91 + sudo ifconfig lo0 alias 198.18.0.92 + sudo ifconfig lo0 alias 198.18.0.93 + sudo ifconfig lo0 alias 198.18.0.94 + sudo ifconfig lo0 alias 198.18.0.95 + sudo ifconfig lo0 alias 198.18.0.96 + sudo ifconfig lo0 alias 198.18.0.97 + # validators + sudo ifconfig lo0 alias 198.18.0.61 + sudo ifconfig lo0 alias 198.18.0.62 + sudo ifconfig lo0 alias 198.18.0.63 + sudo ifconfig lo0 alias 198.18.0.64 + sudo ifconfig lo0 alias 198.18.0.65 + sudo ifconfig lo0 alias 198.18.0.66 + sudo ifconfig lo0 alias 198.18.0.67 +fi + + +echo " +********************************************** + Welcome to blobber/validator development CLI +********************************************** + +" + +echo "Hostname: $hostname" + +set_hostname() { + + read -p "change hostname($hostname), please enter your hostname: " hostname + echo "" + echo "> hostname is updated to: $hostname" +} + +change_zcn() { + zcn=$(cat ../config/0chain_blobber.yaml | grep '^block_worker' | awk -F ' ' '{print $2}') + read -p "change zcn($zcn), please enter your zcn(leave blank for skip): " yourZCN + + if [ ! -z "$yourZCN" -a "$yourZCN" != " " ]; then + find ../config/ -name "0chain_blobber.yaml" -exec sed -i '' "s/block_worker/#block_worker/g" {} \; + find ../config/ -name "0chain_validator.yaml" -exec sed -i '' "s/block_worker/#block_worker/g" {} \; + echo "block_worker: $yourZCN" >> ../config/0chain_blobber.yaml + echo "block_worker: $yourZCN" >> ../config/0chain_validator.yaml + fi + zcn=$(cat ../config/0chain_blobber.yaml | grep '^block_worker' | awk -F ' ' '{print $2}') + echo "> zcn is updated to: $zcn" +} + +install_debuggger() { + [ -d ../.vscode ] || mkdir -p ../.vscode + sed "s/Hostname/$hostname/g" launch.json > ../.vscode/launch.json + echo "debugbbers are installed" +} + +cleanAll() { + + cd $root + rm -rf ./data && echo "data is removed" +} + + +echo " " +echo "Please select which blobber/validator you will work on: " + +select i in "1" "2" "3" "clean all" "install debugers on .vscode/launch.json" "set hostname" "change zcn"; do + case $i in + "1" ) break;; + "2" ) break;; + "3" ) break;; + "clean all" ) cleanAll ;; + "install debugers on .vscode/launch.json" ) install_debuggger;; + "set hostname" ) set_hostname;; + "change zcn" ) change_zcn;; + esac +done + + +install_postgres () { + + echo Installing blobber_postgres in docker... + + [ ! "$(docker ps -a | grep blobber_postgres)" ] && docker run --name blobber_postgres --restart always -p 5432:5432 -e POSTGRES_PASSWORD=postgres -d postgres:11 + + [ -d "./data/blobber" ] || mkdir -p "./data/blobber" + + echo Initializing database + + [ -d "./data/blobber/sql" ] && rm -rf [ -d "./data/blobber/sql" ] + + cp -r ../sql "./data/blobber/" + cd "./data/blobber/sql" + + find . -name "*.sql" -exec sed -i '' "s/blobber_user/blobber_user/g" {} \; + find . -name "*.sql" -exec sed -i '' "s/blobber_meta/blobber_meta/g" {} \; + + + cd $root + [ -d "./data/blobber/bin" ] && rm -rf [ -d "./data/blobber/bin" ] + cp -r ../bin "./data/blobber/" + + + cd $root + + [ ! "$(docker ps -a | grep blobber_postgres_init)" ] && docker rm blobber_postgres_init --force + + + docker run --name blobber_postgres_init \ + --link blobber_postgres:postgres \ + -e POSTGRES_PORT=5432 \ + -e POSTGRES_HOST=postgres \ + -e POSTGRES_USER=postgres \ + -e POSTGRES_PASSWORD=postgres \ + -v $root/data/blobber/bin:/blobber/bin \ + -v $root/data/blobber/sql:/blobber/sql \ + postgres:11 bash /blobber/bin/postgres-entrypoint.sh + + docker rm blobber_postgres_init --force + +} + +prepareRuntime() { + + cd $root + [ -d ./data/blobber$i/config ] && rm -rf $root/data/blobber$i/config + cp -r ../config "./data/blobber$i/" + + cd ./data/blobber$i/config/ + + find . -name "*.yaml" -exec sed -i '' "s/blobber_user/blobber_user$i/g" {} \; + find . -name "*.yaml" -exec sed -i '' "s/blobber_meta/blobber_meta$i/g" {} \; + find . -name "*.yaml" -exec sed -i '' "s/postgres/127.0.0.1/g" {} \; + cd $root/data/blobber$i/ + + [ -d files ] || mkdir files + [ -d data ] || mkdir data + [ -d log ] || mkdir log +} + +start_blobber () { + + echo ">>>>>>>>>>>>>> Blobber $i <<<<<<<<<<<<<<<<" + + echo "[1/3] build blobber..." + cd ../code/go/0chain.net/blobber + go build -v -tags "bn256 development" -ldflags "-X 0chain.net/core/build.BuildTag=dev" -o $root/data/blobber$i/blobber . + + echo "[2/3] setup runtime..." + prepareRuntime; + cd $root + port="505$i" + grpc_port="3150$i" + + keys_file="../docker.local/keys_config/b0bnode${i}_keys.txt" + minio_file="../docker.local/keys_config/minio_config.txt" + config_dir="./data/blobber$i/config" + files_dir="./data/blobber$i/files" + log_dir="./data/blobber$i/log" + db_dir="./data/blobber$i/data" + + echo "[3/3] run blobber..." + + + ./data/blobber$i/blobber --port $port --grpc_port $grpc_port --hostname $hostname --deployment_mode 0 --keys_file $keys_file --files_dir $files_dir --log_dir $log_dir --db_dir $db_dir --minio_file $minio_file --config_dir $config_dir +} + +start_validator () { + + echo ">>>>>>>>>>>>>> Validator $i <<<<<<<<<<<<<<<<" + + echo "[1/3] build validator..." + cd ../code/go/0chain.net/validator + go build -v -tags "bn256 development" -gcflags="-N -l" -ldflags "-X 0chain.net/core/build.BuildTag=dev" -o $root/data/blobber$i/validator . + + echo "[2/3] setup runtime" + prepareRuntime; + + cd $root + port="506$i" + hostname="localhost" + keys_file="../docker.local/keys_config/b0bnode${i}_keys.txt" + config_dir="./data/blobber$i/config" + log_dir="./data/blobber$i/log" + + echo "[3/3] run validator..." + ./data/blobber$i/validator --port $port -hostname $hostname --deployment_mode 0 --keys_file $keys_file --log_dir $log_dir --config_dir $config_dir +} + +clean () { + echo "Building blobber $i" + + cd $root + + rm -rf "./data/blobber$i" +} + + +echo " +********************************************** + Blobber/Validator $i +**********************************************" + +echo " " +echo "Please select what you will do: " + +select f in "install postgres" "start blobber" "start validator" "clean"; do + case $f in + "install postgres" ) install_postgres; break;; + "start blobber" ) start_blobber; break;; + "start validator" ) start_validator; break;; + "clean" ) clean; break;; + esac +done + + diff --git a/dev.local/launch.json b/dev.local/launch.json new file mode 100644 index 000000000..364b437d3 --- /dev/null +++ b/dev.local/launch.json @@ -0,0 +1,107 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "debug:blobber1", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/code/go/0chain.net/blobber", + "args": ["--port","5051", + "--grpc_port", "31501", + "--hostname","Hostname", + "--deployment_mode", "0" , + "--keys_file", "../../../../docker.local/keys_config/b0bnode1_keys.txt", + "--minio_file" ,"../../../../docker.local/keys_config/minio_config.txt", + + "--files_dir","../../../../dev.local/data/blobber1/files", + "--log_dir" ,"../../../../dev.local/data/blobber1/log", + "--db_dir", "../../../../dev.local/data/blobber1/data", + "--config_dir", "../../../../dev.local/data/blobber1/config" + ] + }, + { + "name": "debug:blobber2", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/code/go/0chain.net/blobber", + "args": ["--port","5052", + "--grpc_port", "31502", + "--hostname","Hostname", + "--deployment_mode", "0" , + "--keys_file", "../../../../docker.local/keys_config/b0bnode2_keys.txt", + "--minio_file" ,"../../../../docker.local/keys_config/minio_config.txt", + + "--files_dir","../../../../dev.local/data/blobber2/files", + "--log_dir" ,"../../../../dev.local/data/blobber2/log", + "--db_dir", "../../../../dev.local/data/blobber2/data", + "--config_dir", "../../../../dev.local/data/blobber2/config" + ] + }, + { + "name": "debug:blobber3", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/code/go/0chain.net/blobber", + "args": ["--port","5053", + "--grpc_port", "31503", + "--hostname","Hostname", + "--deployment_mode", "0" , + "--keys_file", "../../../../docker.local/keys_config/b0bnode3_keys.txt", + "--minio_file" ,"../../../../docker.local/keys_config/minio_config.txt", + + "--files_dir","../../../../dev.local/data/blobber3/files", + "--log_dir" ,"../../../../dev.local/data/blobber3/log", + "--db_dir", "../../../../dev.local/data/blobber3/data", + "--config_dir", "../../../../dev.local/data/blobber3/config" + ] + }, + { + "name": "debug:validator1", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/code/go/0chain.net/validator", + "args": ["--port","5061", + "--hostname","Hostname", + "--deployment_mode", "0" , + "--keys_file", "../../../../docker.local/keys_config/b0bnode1_keys.txt", + "--log_dir" ,"../../../../dev.local/data/blobber1/log", + "--config_dir", "../../../../dev.local/data/blobber1/config" + ] + }, + { + "name": "debug:validator2", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/code/go/0chain.net/validator", + "args": ["--port","5062", + "--hostname","Hostname", + "--deployment_mode", "0" , + "--keys_file", "../../../../docker.local/keys_config/b0bnode2_keys.txt", + "--log_dir" ,"../../../../dev.local/data/blobber2/log", + "--config_dir", "../../../../dev.local/data/blobber2/config" + ] + }, + { + "name": "debug:validator3", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/code/go/0chain.net/validator", + "args": ["--port","5063", + "--hostname","Hostname", + "--deployment_mode", "0" , + "--keys_file", "../../../../docker.local/keys_config/b0bnode3_keys.txt", + "--log_dir" ,"../../../../dev.local/data/blobber3/log", + "--config_dir", "../../../../dev.local/data/blobber3/config" + ] + } + ] +} \ No newline at end of file diff --git a/docker.aws/build.blobber/Dockerfile b/docker.aws/build.blobber/Dockerfile index 59f46404f..2c61ea11e 100644 --- a/docker.aws/build.blobber/Dockerfile +++ b/docker.aws/build.blobber/Dockerfile @@ -1,5 +1,5 @@ ARG image_tag -FROM golang:1.14.9-alpine3.12 as blobber_build +FROM golang:1.17.1-alpine3.14 as blobber_build RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep @@ -35,7 +35,7 @@ ARG go_bls_tag RUN go build -v -tags ${go_build_mode} -tags ${go_bls_tag} -ldflags "-X 0chain.net/core/build.BuildTag=${image_tag}" # Copy the build artifact into a minimal runtime image: -FROM golang:1.11.4-alpine3.8 +FROM golang:1.17.1-alpine3.14 RUN apk add gmp gmp-dev openssl-dev COPY --from=blobber_build /usr/local/lib/libmcl*.so \ /usr/local/lib/libbls*.so \ diff --git a/docker.aws/build.validator/Dockerfile b/docker.aws/build.validator/Dockerfile index 4c2c0a15e..84c964349 100644 --- a/docker.aws/build.validator/Dockerfile +++ b/docker.aws/build.validator/Dockerfile @@ -1,5 +1,5 @@ ARG image_tag -FROM golang:1.14.9-alpine3.12 as validator_build +FROM golang:1.17.1-alpine3.14 as validator_build RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep @@ -35,7 +35,7 @@ ARG go_bls_tag RUN go build -v -tags ${go_build_mode} -tags ${go_bls_tag} -ldflags "-X 0chain.net/core/build.BuildTag=${image_tag}" # Copy the build artifact into a minimal runtime image: -FROM golang:1.11.4-alpine3.8 +FROM golang:1.17.1-alpine3.14 RUN apk add gmp gmp-dev openssl-dev COPY --from=validator_build /usr/local/lib/libmcl*.so \ /usr/local/lib/libbls*.so \ diff --git a/docker.local/Dockerfile b/docker.local/Dockerfile index ef7b728b0..193fff650 100644 --- a/docker.local/Dockerfile +++ b/docker.local/Dockerfile @@ -1,4 +1,6 @@ -FROM golang:1.14.9-alpine3.12 as blobber_build +FROM golang:1.17.1-alpine3.14 as blobber_build + +LABEL zchain="blobber" RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep @@ -37,7 +39,7 @@ ENV GIT_COMMIT=$GIT_COMMIT RUN go build -v -tags "bn256 development" -gcflags "all=-N -l" -ldflags "-X 0chain.net/core/build.BuildTag=$GIT_COMMIT" # Copy the build artifact into a minimal runtime image: -FROM golang:1.14.9-alpine3.12 +FROM golang:1.17.1-alpine3.14 RUN apk add gmp gmp-dev openssl-dev git COPY --from=blobber_build /usr/local/lib/libmcl*.so \ /usr/local/lib/libbls*.so \ diff --git a/docker.local/Dockerfile.dev b/docker.local/Dockerfile.dev index a2c38d790..83ca7019a 100644 --- a/docker.local/Dockerfile.dev +++ b/docker.local/Dockerfile.dev @@ -1,4 +1,4 @@ -FROM golang:1.14.9-alpine3.12 as blobber_build +FROM golang:1.17.1-alpine3.14 as blobber_build RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep @@ -38,7 +38,7 @@ ENV GIT_COMMIT=$GIT_COMMIT RUN go build -v -tags "bn256 development" -ldflags "-X 0chain.net/core/build.BuildTag=$GIT_COMMIT" # Copy the build artifact into a minimal runtime image: -FROM golang:1.14.9-alpine3.12 +FROM golang:1.17.1-alpine3.14 RUN apk add gmp gmp-dev openssl-dev COPY --from=blobber_build /usr/local/lib/libmcl*.so \ /usr/local/lib/libbls*.so \ diff --git a/docker.local/IntegrationTestsBlobberDockerfile b/docker.local/IntegrationTestsBlobberDockerfile index 77c37028f..e7d0c1bb2 100644 --- a/docker.local/IntegrationTestsBlobberDockerfile +++ b/docker.local/IntegrationTestsBlobberDockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14.9-alpine3.12 as blobber_build +FROM golang:1.17.1-alpine3.14 as blobber_build RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep @@ -31,7 +31,7 @@ WORKDIR $SRC_DIR/go/0chain.net/blobber RUN go build -v -tags "bn256 development integration_tests" -ldflags "-X 0chain.net/core/build.BuildTag=$GIT_COMMIT" # Copy the build artifact into a minimal runtime image: -FROM golang:1.11.4-alpine3.8 +FROM golang:1.17.1-alpine3.14 RUN apk add gmp gmp-dev openssl-dev COPY --from=blobber_build /usr/local/lib/libmcl*.so \ /usr/local/lib/libbls*.so \ diff --git a/docker.local/ValidatorDockerfile b/docker.local/ValidatorDockerfile index cd5f594a1..f451e522f 100644 --- a/docker.local/ValidatorDockerfile +++ b/docker.local/ValidatorDockerfile @@ -1,4 +1,6 @@ -FROM golang:1.14.9-alpine3.12 as validator_build +FROM golang:1.17.1-alpine3.14 as validator_build + +LABEL zchain="validator" RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep @@ -35,7 +37,7 @@ WORKDIR $SRC_DIR/code/go/0chain.net/validator RUN go build -v -tags "bn256 development" -ldflags "-X 0chain.net/core/build.BuildTag=$GIT_COMMIT" # Copy the build artifact into a minimal runtime image: -FROM golang:1.11.4-alpine3.8 +FROM golang:1.17.1-alpine3.14 RUN apk add gmp gmp-dev openssl-dev COPY --from=validator_build /usr/local/lib/libmcl*.so \ /usr/local/lib/libbls*.so \ diff --git a/docker.local/ValidatorDockerfile.dev b/docker.local/ValidatorDockerfile.dev index 1841a035f..a574c9841 100644 --- a/docker.local/ValidatorDockerfile.dev +++ b/docker.local/ValidatorDockerfile.dev @@ -1,4 +1,4 @@ -FROM golang:1.14.9-alpine3.12 as validator_build +FROM golang:1.17.1-alpine3.14 as validator_build RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep @@ -36,7 +36,7 @@ WORKDIR $SRC_DIR/go/0chain.net/validator RUN go build -v -tags "bn256 development" -ldflags "-X 0chain.net/core/build.BuildTag=$GIT_COMMIT" # Copy the build artifact into a minimal runtime image: -FROM golang:1.11.4-alpine3.8 +FROM golang:1.17.1-alpine3.14 RUN apk add gmp gmp-dev openssl-dev COPY --from=validator_build /usr/local/lib/libmcl*.so \ /usr/local/lib/libbls*.so \ diff --git a/docker.local/base.Dockerfile b/docker.local/base.Dockerfile new file mode 100644 index 000000000..f39a5860c --- /dev/null +++ b/docker.local/base.Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.17.1-alpine3.14 as blobber_base + +LABEL zchain="blobber" + +RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories + +RUN apk add --update --no-cache build-base linux-headers git cmake bash perl grep + +# Install Herumi's cryptography +RUN apk add gmp gmp-dev openssl-dev && \ + cd /tmp && \ + wget -O - https://github.com/herumi/mcl/archive/master.tar.gz | tar xz && \ + mv mcl* mcl && \ + make -C mcl -j $(nproc) lib/libmclbn256.so install && \ + cp mcl/lib/libmclbn256.so /usr/local/lib && \ + rm -R /tmp/mcl + +RUN git clone https://github.com/herumi/bls /tmp/bls && \ + cd /tmp/bls && \ + git submodule init && \ + git submodule update && \ + make -j $(nproc) install && \ + cd - && \ + rm -R /tmp/bls diff --git a/docker.local/bin/build.base.sh b/docker.local/bin/build.base.sh new file mode 100755 index 000000000..252909943 --- /dev/null +++ b/docker.local/bin/build.base.sh @@ -0,0 +1,8 @@ +#!/bin/sh +set -e + +GIT_COMMIT=$(git rev-list -1 HEAD) +echo $GIT_COMMIT + + +docker build --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/base.Dockerfile . -t blobber_base \ No newline at end of file diff --git a/docker.local/bin/build.blobber.sh b/docker.local/bin/build.blobber.sh index 4cd4fda82..64daed5d1 100755 --- a/docker.local/bin/build.blobber.sh +++ b/docker.local/bin/build.blobber.sh @@ -17,12 +17,9 @@ do esac done -docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/ValidatorDockerfile . -t validator -docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/Dockerfile . -t blobber +# [ -d ./gosdk ] && rm -rf gosdk +# cp -r ../gosdk ./ -for i in $(seq 1 6); -do - BLOBBER=$i docker-compose -p blobber$i -f docker.local/docker-compose.yml build --force-rm -done -docker.local/bin/sync_clock.sh +docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/blobber.Dockerfile . -t blobber + diff --git a/docker.local/bin/build.validator.sh b/docker.local/bin/build.validator.sh new file mode 100755 index 000000000..162828098 --- /dev/null +++ b/docker.local/bin/build.validator.sh @@ -0,0 +1,24 @@ +#!/bin/sh +set -e + +GIT_COMMIT=$(git rev-list -1 HEAD) +echo $GIT_COMMIT + +cmd="build" + +for arg in "$@" +do + case $arg in + -m1|--m1|m1) + echo "The build will be performed for Apple M1 chip" + cmd="buildx build --platform linux/amd64" + shift + ;; + esac +done + +# [ -d ./gosdk ] && rm -rf gosdk +# cp -r ../gosdk ./ + +docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/validator.Dockerfile . -t validator + diff --git a/docker.local/blobber.Dockerfile b/docker.local/blobber.Dockerfile new file mode 100644 index 000000000..528599e4a --- /dev/null +++ b/docker.local/blobber.Dockerfile @@ -0,0 +1,35 @@ +FROM blobber_base as blobber_build + +LABEL zchain="blobber" + + +ENV SRC_DIR=/0chain +ENV GO111MODULE=on +#ENV GOPROXY=https://goproxy.cn,direct + +# Download the dependencies: +# Will be cached if we don't change mod/sum files +COPY . $SRC_DIR +# COPY ./gosdk /gosdk + +RUN cd $SRC_DIR/ && go mod download + + +WORKDIR $SRC_DIR/code/go/0chain.net/blobber + +ARG GIT_COMMIT +ENV GIT_COMMIT=$GIT_COMMIT +RUN go build -v -tags "bn256 development" -ldflags "-X 0chain.net/core/build.BuildTag=$GIT_COMMIT" + +# Copy the build artifact into a minimal runtime image: +FROM golang:1.17.1-alpine3.14 +RUN apk add gmp gmp-dev openssl-dev git +COPY --from=blobber_build /usr/local/lib/libmcl*.so \ + /usr/local/lib/libbls*.so \ + /usr/local/lib/ + + +ENV APP_DIR=/blobber +WORKDIR $APP_DIR +COPY --from=blobber_build /0chain/code/go/0chain.net/blobber/blobber $APP_DIR/bin/blobber + diff --git a/docker.local/docker-compose.yml b/docker.local/docker-compose.yml index be11ce879..baa238b28 100644 --- a/docker.local/docker-compose.yml +++ b/docker.local/docker-compose.yml @@ -8,6 +8,8 @@ services: default: ports: - "543${BLOBBER}:5432" + labels: + zchain: "postgres" postgres-post: image: postgres:11 environment: @@ -17,6 +19,8 @@ services: volumes: - ../bin:/blobber/bin - ../sql:/blobber/sql + labels: + zchain: "postgres-post" command: bash /blobber/bin/postgres-entrypoint.sh links: - postgres:postgres @@ -35,6 +39,8 @@ services: - ./keys_config:/blobber/keysconfig ports: - "506${BLOBBER}:506${BLOBBER}" + labels: + zchain: "validator" command: ./bin/validator --port 506${BLOBBER} --hostname localhost --deployment_mode 0 --keys_file keysconfig/bnode${BLOBBER}_keys.txt --log_dir /blobber/log networks: default: @@ -56,6 +62,8 @@ services: ports: - "505${BLOBBER}:505${BLOBBER}" - "703${BLOBBER}:703${BLOBBER}" + labels: + zchain: "blobber" command: ./bin/blobber --port 505${BLOBBER} --grpc_port 703${BLOBBER} --hostname localhost --deployment_mode 0 --keys_file keysconfig/bnode${BLOBBER}_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --minio_file keys_config/minio_config.txt networks: default: diff --git a/docker.local/validator.Dockerfile b/docker.local/validator.Dockerfile new file mode 100644 index 000000000..58cbb9c4a --- /dev/null +++ b/docker.local/validator.Dockerfile @@ -0,0 +1,30 @@ +FROM blobber_base as validator_build + +LABEL zchain="validator" + + +ENV SRC_DIR=/blobber +ENV GO111MODULE=on +#ENV GOPROXY=https://goproxy.cn,direct + + +# Download the dependencies: +# Will be cached if we don't change mod/sum files +COPY . $SRC_DIR +# COPY ./gosdk /gosdk + +RUN cd $SRC_DIR/ && go mod download + +WORKDIR $SRC_DIR/code/go/0chain.net/validator + +RUN go build -v -tags "bn256 development" -ldflags "-X 0chain.net/core/build.BuildTag=$GIT_COMMIT" + +# Copy the build artifact into a minimal runtime image: +FROM golang:1.17.1-alpine3.14 +RUN apk add gmp gmp-dev openssl-dev +COPY --from=validator_build /usr/local/lib/libmcl*.so \ + /usr/local/lib/libbls*.so \ + /usr/local/lib/ +ENV APP_DIR=/blobber +WORKDIR $APP_DIR +COPY --from=validator_build $APP_DIR/code/go/0chain.net/validator/validator $APP_DIR/bin/validator \ No newline at end of file diff --git a/docs/src/stream_hash.puml b/docs/src/stream_hash.puml new file mode 100644 index 000000000..4a193df5a --- /dev/null +++ b/docs/src/stream_hash.puml @@ -0,0 +1,157 @@ +@startuml Stream Hash (Current) + +actor Client + +title Stream Hash (Current) +Client -> Blockchain : Make allocation request (erasure code, size, expiry) +Blockchain -> Blockchain : Assigns the blobbers \ncreates the read/write pools for allocation +Blockchain --> Client : Allocation status, if success, list of blobbers selected + + +loop till end +group upload chunk [ read bytes with (DataShards * chunk size)/renaming size ] + Client -> "FileHasher:SHA1" : Write bytes - original bytes has to be saved in memory (unencoded, unencrypted) + note right + 1. it prevents stream upload + 2. bytes copy +1 + end note + group sharding [ 1.. DataShards+ParityShards ] + Client -> "ChunkHasher:SHA1" : Write bytes - bytes can be released once it is uploaded (encoded, encrypted) + Client -> "ShardHasher:SHA1" : Write bytes - bytes has to be saved in memory (encoded, encrypted) + note right + 1. it prevents stream upload + 2. bytes copy +1 + end note + + group outsourcing attack protection [ 1..1024 ] + Client -> "SivaHasher:Merkle+SHA3" : Write bytes to leaf node - MerkleLeaves[i/64].Write(bytes[i:i+64]) + note right + 1. it prevents stream upload. Because leaf is hashed with sha3.New256 + 2. bytes copy +1 + end note + end + + alt first chunk + "ChunkHasher:SHA1" -> Client: compute chunk bytes hash + Client -> Blobber : PATCH request - Upload the shard bytes (path, metadata, connection id) and thumbnail shard bytes if it has + + else final chunk + "ChunkHasher:SHA1" -> Client: compute chunk bytes hash + "FileHasher:SHA1" -> Client : compute sha1 hash for full original file content + note right + Memory can be releaed now + end note + "ShardHasher:SHA1" -> Client : compute sha1 hash for all shard bytes that has been uploaded to this blobber + note right + Memory can be releaed now + end note + "SivaHasher:Merkle+SHA3" -> Client : compute merkel root wiht MarkelLeaves + note right + Memory can be releaed now + end note + Client -> Blobber : PATCH request - Upload the shard bytes (path, metadata, connection id) and final flag + else streaming chunk + "ChunkHasher:SHA1" -> Client: compute chunk bytes hash + Client -> Blobber : PATCH request - Upload the shard bytes (path, metadata, connection id) + end + end + Blobber --> Client : Upload successful +end + +end +Client -> Blobber : commit the connection and pass the writemarker (allocation root, prev allocation root, upload size) +Blobber -> Blobber : validates the write marker, commits any delete tokens +Blobber --> Client : commit success / failure + + + +Blobber -> Blockchain : redeem the write marker in the order received +note right +async redeemption +not blocking uploads +end note + +Blockchain -> Blockchain : Move tokens from write pool to challenge pool + +@enduml + + +@startuml Stream Hash (New) + +actor Client + +title Stream Hash (New) +Client -> Blockchain : Make allocation request (erasure code, size, expiry) +Blockchain -> Blockchain : Assigns the blobbers \ncreates the read/write pools for allocation +Blockchain --> Client : Allocation status, if success, list of blobbers selected + + +loop till end +group upload chunk [ read bytes with (DataShards * chunk size)/renaming size ] + Client -> "FileHasher:StreamMerkle+SHA1" : Compute chunk bytes instantly with sha1 as merkel leaf - bytes can be released after hashing (unencoded, unencrypted) + note right + Merkle tree is auto balanced as much as possible when a new leaf is pushed.Memory is took as less as possible. The tree is stateful, is easy to save and reload. + end note + group sharding [ 1.. DataShards+ParityShards ] + + group outsourcing attack protection [ 1..1024 ] + Client -> "ShardHasher:SivaMerkle+(StreamMerkle+SHA1)" : Compute 1/1024 bytes instanty, and append to leaf's StreamMerkleHasher as leaf's leaf node + note right + SivaMerkle+(StreamMerkle+SHA1) is a merkle tree with 1024 leaves. A leaf is a StreamMerkleHasher tree with many leaves that is hashed with sha1 + end note + end + + alt first chunk + "ChunkHasher:SHA1" -> Client: compute chunk bytes hash + Client -> Blobber : PATCH request - Upload the shard bytes (path, metadata, connection id) and thumbnail shard bytes if it has + else final chunk + "ChunkHasher:SHA1" -> Client: compute chunk bytes hash + "FileHasher:StreamMerkle+SHA1" -> Client : compute merkel root with states + note right + States can be releaed now + end note + + "ShardHasher:SivaMerkle+(StreamMerkle+SHA1)" -> Client : compute 1024 leaf's merkel root first, and compute top merkel root based 1024 merkel roots + note right + States can be releaed now + end note + Client -> Blobber : PATCH request - Upload the shard bytes (path, metadata, connection id) and final flag + else streaming chunk + "ChunkHasher:SHA1" -> Client: compute chunk bytes hash + Client -> Blobber : PATCH request - Upload the shard bytes (path, metadata, connection id) + end + end + Blobber --> Client : Upload successful +end + +end +Client -> Blobber : commit the connection and pass the writemarker (allocation root, prev allocation root, upload size) +Blobber -> Blobber : validates the write marker, commits any delete tokens +Blobber --> Client : commit success / failure + + + +Blobber -> Blockchain : redeem the write marker in the order received +note right +async redeemption +not blocking uploads +end note + +Blockchain -> Blockchain : Move tokens from write pool to challenge pool + +@enduml + +@startuml StandardMerkleHahser + +title StandardMerkleHahser + + +@enduml + + +@startuml StreamMerkleHahser + +title StreamMerkleHahser + + +@enduml \ No newline at end of file diff --git a/docs/src/upload_flow.puml b/docs/src/upload_flow.puml new file mode 100644 index 000000000..f22821f2b --- /dev/null +++ b/docs/src/upload_flow.puml @@ -0,0 +1,36 @@ +@startuml Upload / Delete Flow + +actor Client + +title Upload/Delete flow +Client -> Blockchain : Make allocation request (erasure code, size, expiry) +Blockchain -> Blockchain : Assigns the blobbers \ncreates the read/write pools for allocation +Blockchain --> Client : Allocation status, if success, list of blobbers selected + + +loop till done +group upload + Client -> Blobber : POST request - Upload the file (path, metadata, connection id) + Blobber --> Client : Upload successful +end +group delete + Client -> Blobber : DELETE request - Delete the file and pass delete token (path hash, file hash from list api) + Blobber -> Blobber : Validate the delete token and stores with New state + Blobber --> Client : delete successful +end +end +Client -> Blobber : commit the connection and pass the writemarker (allocation root, prev allocation root, upload size) +Blobber -> Blobber : validates the write marker, commits any delete tokens +Blobber --> Client : commit success / failure + + + +Blobber -> Blockchain : redeem the write marker in the order received +note right +async redeemption +not blocking uploads +end note + +Blockchain -> Blockchain : Move tokens from write pool to challenge pool + +@enduml \ No newline at end of file diff --git a/go.mod b/go.mod index 1744366f0..751f34318 100644 --- a/go.mod +++ b/go.mod @@ -1,30 +1,31 @@ module github.com/0chain/blobber +go 1.16 + require ( - github.com/0chain/errors v1.0.2 - github.com/0chain/gosdk v1.2.85-0.20210905084101-4d9d75eb73f2 + github.com/0chain/errors v1.0.3 + github.com/0chain/gosdk v1.2.88 github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/didip/tollbooth v4.0.2+incompatible github.com/go-ini/ini v1.55.0 // indirect - github.com/gorilla/handlers v1.4.2 - github.com/gorilla/mux v1.7.3 + github.com/gorilla/handlers v1.5.1 + github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0 - github.com/herumi/bls-go-binary v0.0.0-20191119080710-898950e1a520 + github.com/herumi/bls-go-binary v1.0.1-0.20210830012634-a8e769d3b872 github.com/improbable-eng/grpc-web v0.14.0 github.com/jackc/pgproto3/v2 v2.0.4 // indirect github.com/koding/cache v0.0.0-20161222233015-e8a81b0b3f20 github.com/minio/minio-go v6.0.14+incompatible github.com/mitchellh/mapstructure v1.3.1 - github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce github.com/rs/cors v1.8.0 // indirect github.com/selvatico/go-mocket v1.0.7 github.com/spf13/viper v1.7.0 github.com/stretchr/testify v1.7.0 go.uber.org/ratelimit v0.2.0 - go.uber.org/zap v1.16.0 + go.uber.org/zap v1.19.0 golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb google.golang.org/grpc v1.36.0 @@ -34,12 +35,9 @@ require ( gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 gorm.io/datatypes v0.0.0-20200806042100-bc394008dd0d - gorm.io/driver/mysql v0.3.1 gorm.io/driver/postgres v1.0.0 gorm.io/gorm v1.20.4 nhooyr.io/websocket v1.8.7 // indirect ) -go 1.13 - -//replace github.com/0chain/gosdk => ../gosdk +// replace github.com/0chain/gosdk => ../gosdk diff --git a/go.sum b/go.sum index 26206f411..6e1914eef 100644 --- a/go.sum +++ b/go.sum @@ -36,10 +36,11 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/0chain/errors v1.0.2 h1:IIUMeh/qFlqDcyHesjU92CpRMVz9dIQWAtZooqrYinA= github.com/0chain/errors v1.0.2/go.mod h1:5t76jLb56TKfg/K2VD+eUMmNZJ42QsIRI8KzWuztwU4= -github.com/0chain/gosdk v1.2.85-0.20210905084101-4d9d75eb73f2 h1:R89Ttt4+Qro5prWnA4YyuHIW3QJRdgIYD90BEG5Iaq4= -github.com/0chain/gosdk v1.2.85-0.20210905084101-4d9d75eb73f2/go.mod h1:Bl/wsHdlktgXybdzkHPfaf6ATe7mPmew/xF3ki2gskQ= +github.com/0chain/errors v1.0.3 h1:QQZPFxTfnMcRdt32DXbzRQIfGWmBsKoEdszKQDb0rRM= +github.com/0chain/errors v1.0.3/go.mod h1:xymD6nVgrbgttWwkpSCfLLEJbFO6iHGQwk/yeSuYkIc= +github.com/0chain/gosdk v1.2.88 h1:rllzNvDvLP9iDXI6jYT9v3aBgeucGvb1uOpA9Af2AjQ= +github.com/0chain/gosdk v1.2.88/go.mod h1:JtvcqYYWRdOVFm0pvjdKO5pCiItc/Is2f5wTuuA8F4M= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= @@ -59,17 +60,15 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.28.0/go.mod h1:j/2xTrU39dlzBmsxF1eQ2/DdWrxyBCl6pzz7a81o/ZY= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= -github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= +github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= @@ -78,9 +77,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= @@ -90,17 +87,12 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aristanetworks/fsnotify v1.4.2/go.mod h1:D/rtu7LpjYM8tRJphJ0hUBYpjai8SfX+aSNsWDTq/Ks= -github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/aristanetworks/goarista v0.0.0-20210308203447-b196d8410f1d/go.mod h1:drswc1gdKErwWsW+gV2R5ELcuHehg5pZD2tat4B65Ik= -github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= @@ -112,6 +104,8 @@ github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7 github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -120,7 +114,6 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= @@ -140,9 +133,7 @@ github.com/bufbuild/buf v0.37.0/go.mod h1:lQ1m2HkIaGOFba6w/aC3KYBHhKEOESP3gaAEpS github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -153,22 +144,19 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/consensys/bavard v0.1.8-0.20210105233146-c16790d2aa8b/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= -github.com/consensys/goff v0.3.10/go.mod h1:xTldOBEHmFiYS0gPXd3NsaEqZWlnmeWcRLWgD3ba3xc= -github.com/consensys/gurvy v0.3.8/go.mod h1:sN75xnsiD593XnhbhvG2PkOy194pZBzqShWF/kwuW/g= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -176,10 +164,12 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -188,12 +178,17 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/didip/tollbooth v4.0.2+incompatible h1:fVSa33JzSz0hoh2NxpwZtksAzAgd7zjmGO20HCZtF4M= github.com/didip/tollbooth v4.0.2+incompatible/go.mod h1:A9b0665CE6l1KmzpDws2++elm/CsuWBMa5Jv4WY0PEY= @@ -201,43 +196,36 @@ github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55k github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/gosigar v0.14.1/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= -github.com/ethereum/go-ethereum v1.10.1/go.mod h1:E5e/zvdfUVr91JZ0AwjyuJM3x+no51zZJRz61orLLSk= -github.com/ethereum/go-ethereum v1.10.3 h1:SEYOYARvbWnoDl1hOSks3ZJQpRiiRJe8ubaQGJQwq0s= -github.com/ethereum/go-ethereum v1.10.3/go.mod h1:99onQmSd1GRGOziyGldI41YQb7EESX3Q4H41IfJgIQQ= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/ethereum/go-ethereum v1.10.8 h1:0UP5WUR8hh46ffbjJV7PK499+uGEyasRIfffS0vy06o= +github.com/ethereum/go-ethereum v1.10.8/go.mod h1:pJNuIUYfX5+JKzSD/BTdNsvJSZ1TJqmz0dVyXMAbf6M= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= @@ -247,6 +235,7 @@ github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -254,15 +243,14 @@ github.com/go-ini/ini v1.55.0 h1:0wVcG9udk2C3TGgmdIGKK9ScOZHZB5nbG+gwji9fhhc= github.com/go-ini/ini v1.55.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= @@ -333,13 +321,13 @@ github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -362,7 +350,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/protobuf v3.14.0+incompatible/go.mod h1:lUQ9D1ePzbH2PrIS7ob/bjm9HXyH5WHB0Akwh7URreM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -375,19 +362,16 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= -github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -407,20 +391,20 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -429,30 +413,29 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/herumi/bls-go-binary v0.0.0-20191119080710-898950e1a520 h1:3ek8BJos3JW72rvPzGAWZwJ/iXjOyPSCUI4nAFnTPvg= -github.com/herumi/bls-go-binary v0.0.0-20191119080710-898950e1a520/go.mod h1:uTBfU/n3h1aOYIl5nNTbLn5dUfNkF1P97JTaz3bdvro= +github.com/herumi/bls-go-binary v1.0.1-0.20210830012634-a8e769d3b872 h1:TV+x87PxW2BcEzpX6yWK0mBEGc9NFppsihFszD6QmyA= +github.com/herumi/bls-go-binary v1.0.1-0.20210830012634-a8e769d3b872/go.mod h1:O4Vp1AfR4raRGwFeQpr9X/PQtncEicMoOe6BQt1oX0Y= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= -github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= -github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 h1:bcAj8KroPf552TScjFPIakjH2/tdIrIH8F+cc4v4SRo= -github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= +github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= +github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/improbable-eng/grpc-web v0.14.0 h1:GdoK+cXABdB+1keuqsV1drSFO2XLYIxqt/4Rj8SWGBk= github.com/improbable-eng/grpc-web v0.14.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= @@ -514,12 +497,6 @@ github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= -github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= -github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -532,27 +509,22 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kilic/bls12-381 v0.0.0-20201226121925-69dacb279461/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -561,11 +533,10 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.9 h1:5OCMOdde1TCT2sookEuVeEZzA8bmRSFV3AwPDZAG8AA= -github.com/klauspost/compress v1.11.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.2/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -578,7 +549,6 @@ github.com/koding/cache v0.0.0-20161222233015-e8a81b0b3f20 h1:R7RAW1p8wjhlHKFhS4 github.com/koding/cache v0.0.0-20161222233015-e8a81b0b3f20/go.mod h1:sh5SGGmQVGUkWDnxevz0I2FJ4TeC18hRPRjKVBMb2kA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= @@ -589,7 +559,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= @@ -604,19 +575,24 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8= github.com/lithammer/shortuuid/v3 v3.0.7/go.mod h1:vMk8ke37EmiewwolSO1NLW8vP4ZaKlRuDIi8tWWmAts= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magma/augmented-networks/accounting/protos v0.1.1/go.mod h1:Hpfg8aAxldUN7qlVtR5xwlAf8pcetFm8DWwRKZsh2J4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -625,7 +601,6 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -634,8 +609,6 @@ github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71 github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miguelmota/go-ethereum-hdwallet v0.0.1 h1:DWqgZtKWTGcHR5QsprMJItZiJ2xVEQTv640r597ul8M= -github.com/miguelmota/go-ethereum-hdwallet v0.0.1/go.mod h1:iowKavXnc0NVNiv/UKYYBo3SjADph5PUvYQTjOIV9as= github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -656,9 +629,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= @@ -669,33 +641,22 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/openconfig/gnmi v0.0.0-20210226144353-8eae1937bf84/go.mod h1:H/20NXlnWbCPFC593nxpiKJ+OU//7mW7s7Qk7uVdg3Q= -github.com/openconfig/goyang v0.0.0-20200115183954-d0a48929f0ea/go.mod h1:dhXaV0JgHJzdrHi2l+w0fZrwArtXL7jEFoiqLEdmkvU= -github.com/openconfig/reference v0.0.0-20201210185750-72ca4cfd4abd/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= -github.com/openconfig/ygot v0.6.0/go.mod h1:o30svNf7O0xK+R35tlx95odkDmZWS9JyWWQSmIhqwAs= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -710,18 +671,15 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -737,36 +695,25 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce h1:aP+C+YbHZfOQlutA4p4soHi7rVUqHQdWEVMSkHfDTqY= github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= @@ -774,15 +721,15 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -793,7 +740,6 @@ github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfP github.com/selvatico/go-mocket v1.0.7 h1:jbVa7RkoOCzBanQYiYF+VWgySHZogg25fOIKkM38q5k= github.com/selvatico/go-mocket v1.0.7/go.mod h1:7bSWzuNieCdUlanCVu3w0ppS0LvDtPAZmKBIlhoTcp8= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= @@ -803,7 +749,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -811,25 +756,25 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.1-0.20201006035406-b97b5ead31f7/go.mod h1:yk5b0mALVusDL5fMM6Rd1wgnoO5jUPhwsQ6LQAJTidQ= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -847,13 +792,9 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU= -github.com/templexxx/xor v0.0.0-20191217153810-f85b25db303b/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tjfoc/gmsm v1.4.0/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= @@ -866,19 +807,19 @@ github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2n github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xtaci/kcp-go v5.4.20+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= -github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -901,6 +842,7 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -908,6 +850,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -920,11 +864,13 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -937,14 +883,11 @@ golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -953,7 +896,6 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -977,22 +919,18 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7 golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1026,14 +964,13 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1049,10 +986,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1061,6 +996,7 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1083,7 +1019,6 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1099,23 +1034,19 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988 h1:EjgCl+fVlIaPJSori0ikSz3uV0DOHKWOJFpv1sAAhBM= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1124,8 +1055,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1158,6 +1090,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1168,7 +1101,6 @@ golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1263,7 +1195,6 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210224155714-063164c882e6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb h1:hcskBH5qZCOa7WpTUFUFvoebnSFZBYpjykLtjIp9DVk= @@ -1286,7 +1217,6 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0-dev.0.20201218190559-666aea1fb34c/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -1309,7 +1239,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1325,11 +1254,6 @@ gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:a gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10= gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= @@ -1337,9 +1261,7 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= -gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= @@ -1349,7 +1271,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1383,7 +1304,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= diff --git a/sql/15-add-chunk_size-to-reference_objects.sql b/sql/15-add-chunk_size-to-reference_objects.sql new file mode 100644 index 000000000..a2530e3f0 --- /dev/null +++ b/sql/15-add-chunk_size-to-reference_objects.sql @@ -0,0 +1,9 @@ +-- +-- Add chunk_size column to reference_objects table. +-- + +-- pew-pew +\connect blobber_meta; + + +ALTER TABLE reference_objects ADD COLUMN chunk_size INT NOT NULL DEFAULT 65536;