diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..c9ceeb49e --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,86 @@ +name: Dockerize + +on: + workflow_dispatch: + inputs: + latest_tag: + description: 'type yes for building latest tag' + default: 'no' + required: true + +env: + ZCHAIN_BUILDBASE: zchain_build_base + ZCHAIN_BUILDRUN: zchain_run_base + BLOBBER_REGISTRY: ${{ secrets.BLOBBER_REGISTRY }} + VALIDATOR_REGISTRY: ${{ secrets.VALIDATOR_REGISTRY }} + +jobs: + dockerize_blobber: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Get the version + id: get_version + run: | + BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g') + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + echo ::set-output name=BRANCH::${BRANCH} + echo ::set-output name=VERSION::${BRANCH}-${SHORT_SHA} + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build blobber + run: | + docker build -t $BLOBBER_REGISTRY:$TAG -f "$DOCKERFILE_BLOB" . + docker tag $BLOBBER_REGISTRY:$TAG $BLOBBER_REGISTRY:latest + docker push $BLOBBER_REGISTRY:$TAG + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + DOCKERFILE_BLOB: "docker.local/Dockerfile" + + - name: Push blobber + run: | + if [[ "$PUSH_LATEST" == "yes" ]]; then + docker push $BLOBBER_REGISTRY:latest + fi + env: + PUSH_LATEST: ${{ github.event.inputs.latest_tag }} + + dockerize_validator: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v1 + + - name: Get the version + id: get_version + run: | + BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g') + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + echo ::set-output name=BRANCH::${BRANCH} + echo ::set-output name=VERSION::${BRANCH}-${SHORT_SHA} + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build validator + run: | + docker build -t $VALIDATOR_REGISTRY:$TAG -f "$DOCKERFILE_PROXY" . + docker tag $VALIDATOR_REGISTRY:$TAG $VALIDATOR_REGISTRY:latest + docker push $VALIDATOR_REGISTRY:$TAG + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + DOCKERFILE_PROXY: "docker.local/ValidatorDockerfile" + + - name: Push validator + run: | + if [[ "$PUSH_LATEST" == "yes" ]]; then + docker push $VALIDATOR_REGISTRY:latest + fi + env: + PUSH_LATEST: ${{ github.event.inputs.latest_tag }} \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2fc27a3d3..8b53fa32f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,7 +29,6 @@ jobs: run: | docker network create --driver=bridge --subnet=198.18.0.0/15 --gateway=198.18.0.255 testnet0 ./docker.local/bin/build.blobber.sh - test: runs-on: ubuntu-20.04 steps: @@ -128,4 +127,4 @@ jobs: docker build -t $VALIDATOR_REGISTRY:$TAG -f docker.local/ValidatorDockerfile . docker push $VALIDATOR_REGISTRY:$TAG env: - TAG: ${{ steps.get_version.outputs.VERSION }} + TAG: ${{ steps.get_version.outputs.VERSION }} \ No newline at end of file diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml new file mode 100644 index 000000000..abb166577 --- /dev/null +++ b/.github/workflows/cicd.yml @@ -0,0 +1,108 @@ +name: CICD_TEST_HERTZNER + +on: + push: + branches: + - gitactionsfix + +env: + GITHUB_TOKEN: ${{ secrets.CICD }} + +jobs: + build-push-image: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Get Branch + id: get_branch + run: | + BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g') + echo ::set-output name=BRANCH::${BRANCH} + echo "This workflow run is triggered by ${{ github.event_name }} ." + + - name: Triggering build.yml for creating & pushing docker images. + uses: convictional/trigger-workflow-and-wait@v1.3.0 + with: + owner: 0chain + repo: blobber + github_token: ${{ secrets.CICD }} + workflow_file_name: test.yml + ref: ${{ steps.get_branch.outputs.BRANCH }} + # inputs: '{"DOCKERHUB_REPO":"TEST"}' + propagate_failure: true + trigger_workflow: true + wait_workflow: true + + # network-setup: + # runs-on: ubuntu-20.04 + # env: + # HOST: testnet.load.testnet-0chain.net + # steps: + # - uses: actions/checkout@v2 + + # - uses: azure/setup-helm@v1 + # with: + # version: 'v3.2.2' + # - name: Setup helm repo + # run: | + # helm repo add 0chain http://0chain-helm-chart.s3-website.us-east-2.amazonaws.com/ + # - name: Setup kubeconfig + # run: | + # mkdir -p ~/.kube + # echo "${{ secrets.KUBECONFIG64TEST }}" | base64 -d > ~/.kube/config + # - name: Uninstall old release + # run: | + # helm uninstall zerochain -n zerochain || true + # - name: Setup chain + # run: | + # helm install zerochain -n zerochain \ + # --set ingress.host=${HOST} \ + # --set sharder.image.tag=latest \ + # --set miner.image.tag=latest \ + # --set blobber.image.tag=latest \ + # --set validator.image.tag=latest \ + # 0chain/0chain + # - name: Check if services are up + # run: | + # printf 'Waiting for 0dns' + # until [[ $(curl -I --silent -o /dev/null -w %{http_code} http://${HOST}/dns/) =~ 2[0-9][0-9] ]] ;do + # printf '.' + # sleep 2 + # done + # printf 'Waiting for 1st sharder' + # until [[ $(curl -I --silent -o /dev/null -w %{http_code} http://${HOST}/sharder0/) =~ 2[0-9][0-9] ]] ;do + # printf '.' + # sleep 2 + # done + # printf 'Waiting for 1st miner' + # until [[ $(curl -I --silent -o /dev/null -w %{http_code} http://${HOST}/miner0/) =~ 2[0-9][0-9] ]] ;do + # printf '.' + # sleep 2 + # done + + # # - name: Triggering ci.yml to run Postman API tests + # # uses: convictional/trigger-workflow-and-wait@v1.3.0 + # # with: + # # owner: 0chain + # # repo: 0proxy + # # github_token: ${{ secrets.GOSDK }} + # # workflow_file_name: build.yml + # # ref: master + # # propagate_failure: true + # # trigger_workflow: true + # # wait_workflow: true + + # # - name: Build + # # run: make build + # # - name: Running Load Tests + # # run: | + # # make run config=loadTest-load-testnet.yaml + + # - name: Uninstall the release + # if: ${{ always() }} + # run: helm uninstall zerochain -n zerochain + + # - name: Deleting kubeconfig + # run: | + # rm -rf ~/.kube diff --git a/.github/workflows/cicd_hetz.yml b/.github/workflows/cicd_hetz.yml new file mode 100644 index 000000000..47a16e435 --- /dev/null +++ b/.github/workflows/cicd_hetz.yml @@ -0,0 +1,166 @@ +name: CICD_Hetzner + +# on: +# push: +# branches: +# - gitactionsfix + +on: + workflow_dispatch: + inputs: + latest_tag: + description: 'type yes for building latest tag' + default: 'no' + required: true + +env: + ZCHAIN_BUILDBASE: zchain_build_base + ZCHAIN_BUILDRUN: zchain_run_base + BLOBBER_REGISTRY: ${{ secrets.BLOBBER_REGISTRY_TEST }} + VALIDATOR_REGISTRY: ${{ secrets.VALIDATOR_REGISTRY_TEST }} + KUBE_CONFIG_DATA: ${{ secrets.KUBE_CONFIG_DATA_TEST }} + KUBE_NAMESPACE: test + +jobs: + dockerize_blobber: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Get the Version for Tagging + id: get_version + run: | + BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g') + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + echo ::set-output name=BRANCH::${BRANCH} + echo ::set-output name=VERSION::${BRANCH}-${SHORT_SHA} + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build & Push Blobber Image + run: | + docker build -t $BLOBBER_REGISTRY:$TAG -f "$DOCKERFILE_BLOB" . + docker tag $BLOBBER_REGISTRY:$TAG $BLOBBER_REGISTRY:latest + # docker push $BLOBBER_REGISTRY:$TAG + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + DOCKERFILE_BLOB: "docker.local/Dockerfile" + + - name: Push Blobber Image with Latest TAG + run: | + if [[ "$PUSH_LATEST" == "yes" ]]; then + docker push $BLOBBER_REGISTRY:latest + fi + env: + PUSH_LATEST: ${{ github.event.inputs.latest_tag }} + + - name: Update Blobber Image to Kubernetes Cluster + uses: kodermax/kubectl-aws-eks@master + with: + args: set image deployment/blobber-01 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/blobber-02 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/blobber-03 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/blobber-04 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/blobber-05 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/blobber-06 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + + - name: Verify Kubernetes Deployment + uses: kodermax/kubectl-aws-eks@master + with: + args: rollout status blobber-01 my-pod -n $KUBE_NAMESPACE + args: rollout status blobber-02 my-pod -n $KUBE_NAMESPACE + args: rollout status blobber-03 my-pod -n $KUBE_NAMESPACE + args: rollout status blobber-04 my-pod -n $KUBE_NAMESPACE + args: rollout status blobber-05 my-pod -n $KUBE_NAMESPACE + args: rollout status blobber-06 my-pod -n $KUBE_NAMESPACE + + # - name: Triggering LoadTest Repo build + # uses: convictional/trigger-workflow-and-wait@v1.3.0 + # with: + # owner: 0chain + # repo: loadTest + # github_token: ${{ secrets.GOSDK }} + # workflow_file_name: load-test-v1.yml + # ref: master + # inputs: '{}' + # propagate_failure: true + # trigger_workflow: true + # wait_workflow: true + + + dockerize_validator: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v1 + + - name: Get the Version for Tagging + id: get_version + run: | + BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g') + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + echo ::set-output name=BRANCH::${BRANCH} + echo ::set-output name=VERSION::${BRANCH}-${SHORT_SHA} + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build & Push Validator Image + run: | + docker build -t $VALIDATOR_REGISTRY:$TAG -f "$DOCKERFILE_PROXY" . + docker tag $VALIDATOR_REGISTRY:$TAG $VALIDATOR_REGISTRY:latest + # docker push $VALIDATOR_REGISTRY:$TAG + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + DOCKERFILE_PROXY: "docker.local/ValidatorDockerfile" + + - name: Push Validator Image with Latest TAG + run: | + if [[ "$PUSH_LATEST" == "yes" ]]; then + docker push $VALIDATOR_REGISTRY:latest + fi + env: + PUSH_LATEST: ${{ github.event.inputs.latest_tag }} + + - name: Update Validator Image to Kubernetes Cluster + uses: kodermax/kubectl-aws-eks@master + with: + args: set image deployment/validator-01 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/validator-02 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/validator-03 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/validator-04 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/validator-05 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + args: set image deployment/validator-06 app=$BLOBBER_REGISTRY:$TAG --record -n $KUBE_NAMESPACE + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + + - name: Verify Kubernetes Deployment + uses: kodermax/kubectl-aws-eks@master + with: + args: rollout status validator-01 -n $KUBE_NAMESPACE + args: rollout status validator-02 -n $KUBE_NAMESPACE + args: rollout status validator-03 -n $KUBE_NAMESPACE + args: rollout status validator-04 -n $KUBE_NAMESPACE + args: rollout status validator-05 -n $KUBE_NAMESPACE + args: rollout status validator-06 -n $KUBE_NAMESPACE + + # - name: Triggering LoadTest Repo build + # uses: convictional/trigger-workflow-and-wait@v1.3.0 + # with: + # owner: 0chain + # repo: loadTest + # github_token: ${{ secrets.GOSDK }} + # workflow_file_name: load-test-v1.yml + # ref: master + # inputs: '{}' + # propagate_failure: true + # trigger_workflow: true + # wait_workflow: true \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..bb984e3a6 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,10 @@ +name: test + +on: + workflow_dispatch + +jobs: + dockerize_blobber: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 diff --git a/README.md b/README.md index c1630c531..f55f8a205 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,13 @@ To link to local gosdk so that the changes are reflected on the blobber build pl $ ./docker.local/bin/build.blobber.dev.sh ``` +For Apple M1 chip builds: + +``` + +$ ./docker.local/bin/build.blobber.sh -m1 + +``` 3. After building the container for blobber, go to Blobber1 directory (git/blobber/docker.local/blobber1) and run the container using diff --git a/code/go/0chain.net/blobber/main.go b/code/go/0chain.net/blobber/main.go index 322c27232..2a1944721 100644 --- a/code/go/0chain.net/blobber/main.go +++ b/code/go/0chain.net/blobber/main.go @@ -8,13 +8,16 @@ import ( "log" "net" "net/http" - "net/url" "os" "runtime" "strconv" - "strings" "time" + "go.uber.org/zap" + "github.com/gorilla/handlers" + "github.com/gorilla/mux" + "github.com/spf13/viper" + "github.com/0chain/gosdk/zcncore" "0chain.net/blobbercore/allocation" "0chain.net/blobbercore/challenge" "0chain.net/blobbercore/config" @@ -27,32 +30,50 @@ import ( "0chain.net/core/chain" "0chain.net/core/common" "0chain.net/core/encryption" + "0chain.net/core/node" "0chain.net/core/logging" . "0chain.net/core/logging" - "0chain.net/core/node" - "0chain.net/core/transaction" - "0chain.net/core/util" - - "github.com/0chain/gosdk/zcncore" - "github.com/gorilla/handlers" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "go.uber.org/zap" ) -//var BLOBBER_REGISTERED_LOOKUP_KEY = datastore.ToKey("blobber_registration") - var startTime time.Time var serverChain *chain.Chain var filesDir *string var metadataDB *string func initHandlers(r *mux.Router) { - r.HandleFunc("/", HomePageHandler) + r.HandleFunc("/", func (w http.ResponseWriter, r *http.Request) { + mc := chain.GetServerChain() + + fmt.Fprintf(w, "
Running since %v ...\n", startTime) + fmt.Fprintf(w, "
Working on the chain: %v
\n", mc.ID) + fmt.Fprintf(w, + "
I am a blobber with
\n", + node.Self.ID, node.Self.PublicKey, build.BuildTag, + ) + + fmt.Fprintf(w, "
Miners ...\n") + network := zcncore.GetNetwork() + for _, miner := range network.Miners { + fmt.Fprintf(w, "%v\n", miner) + } + + fmt.Fprintf(w, "
Sharders ...\n") + for _, sharder := range network.Sharders { + fmt.Fprintf(w, "%v\n", sharder) + } + }) + handler.SetupHandlers(r) } -func SetupWorkerConfig() { +var fsStore filestore.FileStore //nolint:unused // global which might be needed somewhere + +func initEntities() (err error) { + fsStore, err = filestore.SetupFSStore(*filesDir + "/files") + return err +} + +func setupWorkerConfig() { config.Configuration.ContentRefWorkerFreq = viper.GetInt64("contentref_cleaner.frequency") config.Configuration.ContentRefWorkerTolerance = viper.GetInt64("contentref_cleaner.tolerance") @@ -117,48 +138,7 @@ func SetupWorkerConfig() { config.Configuration.ServiceCharge = viper.GetFloat64("service_charge") } -func SetupWorkers() { - var root = common.GetRootContext() - handler.SetupWorkers(root) - challenge.SetupWorkers(root) - readmarker.SetupWorkers(root) - writemarker.SetupWorkers(root) - allocation.StartUpdateWorker(root, - config.Configuration.UpdateAllocationsInterval) - // stats.StartEventDispatcher(2) -} - -var fsStore filestore.FileStore //nolint:unused // global which might be needed somewhere - -func initEntities() (err error) { - fsStore, err = filestore.SetupFSStore(*filesDir + "/files") - return err -} - -func initServer() { - -} - -func checkForDBConnection() { - retries := 0 - var err error - for retries < 600 { - err = datastore.GetStore().Open() - if err != nil { - time.Sleep(1 * time.Second) - retries++ - continue - } - break - } - - if err != nil { - Logger.Error("Error in opening the database. Shutting the server down") - panic(err) - } -} - -func processMinioConfig(reader io.Reader) error { +func setupMinioConfig(reader io.Reader) error { scanner := bufio.NewScanner(reader) more := scanner.Scan() if !more { @@ -193,26 +173,154 @@ func processMinioConfig(reader io.Reader) error { return nil } -func isValidOrigin(origin string) bool { - var url, err = url.Parse(origin) +func setupWorkers() { + var root = common.GetRootContext() + handler.SetupWorkers(root) + challenge.SetupWorkers(root) + readmarker.SetupWorkers(root) + writemarker.SetupWorkers(root) + allocation.StartUpdateWorker(root, + config.Configuration.UpdateAllocationsInterval) +} + +func setupDatabase() { + // check for database connection + for i := 600; i > 0; i-- { + time.Sleep(1 * time.Second) + if err := datastore.GetStore().Open(); err == nil { + if i == 1 { // no more attempts + Logger.Error("Failed to connect to the database. Shutting the server down") + panic(err) // fail + } + + return // success + } + } +} + +func setupOnChain() { + const ATTEMPT_DELAY = 60 * 1 // 1 minute + + // setup wallet + if err := handler.WalletRegister(); err != nil { + panic(err) + } + + // setup blobber (add or update) on the blockchain (multiple attempts) + for i := 10; i > 0; i-- { + if err := addOrUpdateOnChain(); err != nil { + if i == 1 { // no more attempts + panic(err) + } + } else { + break + } + + time.Sleep(ATTEMPT_DELAY * time.Second) + } + + setupWorkers() + + go healthCheckOnChainWorker() + + if config.Configuration.PriceInUSD { + go addOrUpdateOnChainWorker() + } +} + +func addOrUpdateOnChain() error { + txnHash, err := handler.BlobberAdd(common.GetRootContext()) + if err != nil { + return err + } + + if t, err := handler.TransactionVerify(txnHash); err != nil { + Logger.Error("Failed to verify blobber add/update transaction", zap.Any("err", err), zap.String("txn.Hash", txnHash)) + } else { + Logger.Info("Verified blobber add/update transaction", zap.String("txn_hash", t.Hash), zap.Any("txn_output", t.TransactionOutput)) + } + + return err +} + +func addOrUpdateOnChainWorker() { + var REPEAT_DELAY = 60 * 60 * time.Duration(viper.GetInt("price_worker_in_hours")) // 12 hours with default settings + for { + time.Sleep(REPEAT_DELAY * time.Second) + if err := addOrUpdateOnChain(); err != nil { + continue // pass // required by linting + } + } +} + +func healthCheckOnChain() error { + txnHash, err := handler.BlobberHealthCheck(common.GetRootContext()) if err != nil { - return false + if err == handler.ErrBlobberHasRemoved { + return nil + } else { + return err + } + } + + if t, err := handler.TransactionVerify(txnHash); err != nil { + Logger.Error("Failed to verify blobber health check", zap.Any("err", err), zap.String("txn.Hash", txnHash)) + } else { + Logger.Info("Verified blobber health check", zap.String("txn_hash", t.Hash), zap.Any("txn_output", t.TransactionOutput)) + } + + return err +} + +func healthCheckOnChainWorker() { + const REPEAT_DELAY = 60 * 15 // 15 minutes + + for { + time.Sleep(REPEAT_DELAY * time.Second) + if err := healthCheckOnChain(); err != nil { + continue // pass // required by linting + } } - var host = url.Hostname() - if host == "localhost" { - return true +} + +func setup(logDir string) error { + // init blockchain related stuff + zcncore.SetLogFile(logDir + "/0chainBlobber.log", false) + zcncore.SetLogLevel(3) + if err := zcncore.InitZCNSDK(serverChain.BlockWorker, config.Configuration.SignatureScheme); err != nil { + return err } - if host == "0chain.net" || host == "0box.io" || - strings.HasSuffix(host, ".0chain.net") || - strings.HasSuffix(host, ".alphanet-0chain.net") || - strings.HasSuffix(host, ".testnet-0chain.net") || - strings.HasSuffix(host, ".devnet-0chain.net") || - strings.HasSuffix(host, ".mainnet-0chain.net") { - return true + if err := zcncore.SetWalletInfo(node.Self.GetWalletString(), false); err != nil { + return err } - return false + + // setup on blockchain + go setupOnChain() + return nil } +// // Comment out to pass lint. Still keep this function around in case we want to +// // change how CORS validates origins. +// func isValidOrigin(origin string) bool { +// var url, err = url.Parse(origin) +// if err != nil { +// return false +// } +// var host = url.Hostname() +// if host == "localhost" { +// return true +// } +// if host == "0chain.net" || host == "0box.io" || +// strings.HasSuffix(host, ".0chain.net") || +// strings.HasSuffix(host, ".alphanet-0chain.net") || +// strings.HasSuffix(host, ".testnet-0chain.net") || +// strings.HasSuffix(host, ".devnet-0chain.net") || +// strings.HasSuffix(host, ".mainnet-0chain.net") { +// return true +// } +// return false +// } + func main() { deploymentMode := flag.Int("deployment_mode", 2, "deployment_mode") keysFile := flag.String("keys_file", "", "keys_file") @@ -238,7 +346,7 @@ func main() { } config.Configuration.ChainID = viper.GetString("server_chain.id") config.Configuration.SignatureScheme = viper.GetString("server_chain.signature_scheme") - SetupWorkerConfig() + setupWorkerConfig() if *filesDir == "" { panic("Please specify --files_dir absolute folder name option where uploaded files can be stored") @@ -273,7 +381,7 @@ func main() { panic(err) } - err = processMinioConfig(reader) + err = setupMinioConfig(reader) if err != nil { panic(err) } @@ -309,13 +417,13 @@ func main() { chain.SetServerChain(serverChain) - checkForDBConnection() + setupDatabase() // Initialize after server chain is setup. if err := initEntities(); err != nil { Logger.Error("Error setting up blobber on blockchian" + err.Error()) } - if err := SetupBlobberOnBC(*logDir); err != nil { + if err := setup(*logDir); err != nil { Logger.Error("Error setting up blobber on blockchian" + err.Error()) } mode := "main net" @@ -334,14 +442,18 @@ func main() { headersOk := handlers.AllowedHeaders([]string{ "X-Requested-With", "X-App-Client-ID", "X-App-Client-Key", "Content-Type", + "X-App-Client-Signature", }) - originsOk := handlers.AllowedOriginValidator(isValidOrigin) + + // Allow anybody to access API. + // originsOk := handlers.AllowedOriginValidator(isValidOrigin) + originsOk := handlers.AllowedOrigins([]string{"*"}) + methodsOk := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"}) rl := common.ConfigRateLimits() initHandlers(r) - initServer() grpcServer := handler.NewServerWithMiddlewares(rl) handler.RegisterGRPCServices(r, grpcServer) @@ -379,125 +491,3 @@ func main() { }(*grpcPortString) log.Fatal(server.ListenAndServe()) } - -func RegisterBlobber() { - - registrationRetries := 0 - //ctx := badgerdbstore.GetStorageProvider().WithConnection(common.GetRootContext()) - for registrationRetries < 10 { - txnHash, err := handler.RegisterBlobber(common.GetRootContext()) - time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) - txnVerified := false - verifyRetries := 0 - for verifyRetries < util.MAX_RETRIES { - time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) - t, err := transaction.VerifyTransaction(txnHash, chain.GetServerChain()) - if err == nil { - Logger.Info("Transaction for adding blobber accepted and verified", zap.String("txn_hash", t.Hash), zap.Any("txn_output", t.TransactionOutput)) - //badgerdbstore.GetStorageProvider().WriteBytes(ctx, BLOBBER_REGISTERED_LOOKUP_KEY, []byte(txnHash)) - //badgerdbstore.GetStorageProvider().Commit(ctx) - SetupWorkers() - go BlobberHealthCheck() - if config.Configuration.PriceInUSD { - go UpdateBlobberSettings() - } - return - } - verifyRetries++ - } - - if !txnVerified { - Logger.Error("Add blobber transaction could not be verified", zap.Any("err", err), zap.String("txn.Hash", txnHash)) - } - } -} - -func BlobberHealthCheck() { - const HEALTH_CHECK_TIMER = 60 * 15 // 15 Minutes - for { - txnHash, err := handler.BlobberHealthCheck(common.GetRootContext()) - if err != nil && err == handler.ErrBlobberHasRemoved { - time.Sleep(HEALTH_CHECK_TIMER * time.Second) - continue - } - time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) - txnVerified := false - verifyRetries := 0 - for verifyRetries < util.MAX_RETRIES { - time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) - t, err := transaction.VerifyTransaction(txnHash, chain.GetServerChain()) - if err == nil { - txnVerified = true - Logger.Info("Transaction for blobber health check verified", zap.String("txn_hash", t.Hash), zap.Any("txn_output", t.TransactionOutput)) - break - } - verifyRetries++ - } - - if !txnVerified { - Logger.Error("Blobber health check transaction could not be verified", zap.Any("err", err), zap.String("txn.Hash", txnHash)) - } - time.Sleep(HEALTH_CHECK_TIMER * time.Second) - } -} - -func UpdateBlobberSettings() { - var UPDATE_SETTINGS_TIMER = 60 * 60 * time.Duration(viper.GetInt("price_worker_in_hours")) - time.Sleep(UPDATE_SETTINGS_TIMER * time.Second) - for { - txnHash, err := handler.UpdateBlobberSettings(common.GetRootContext()) - if err != nil { - time.Sleep(UPDATE_SETTINGS_TIMER * time.Second) - continue - } - time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) - txnVerified := false - verifyRetries := 0 - for verifyRetries < util.MAX_RETRIES { - time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) - t, err := transaction.VerifyTransaction(txnHash, chain.GetServerChain()) - if err == nil { - txnVerified = true - Logger.Info("Transaction for blobber update settings verified", zap.String("txn_hash", t.Hash), zap.Any("txn_output", t.TransactionOutput)) - break - } - verifyRetries++ - } - - if !txnVerified { - Logger.Error("Blobber update settings transaction could not be verified", zap.Any("err", err), zap.String("txn.Hash", txnHash)) - } - time.Sleep(UPDATE_SETTINGS_TIMER * time.Second) - } -} - -func SetupBlobberOnBC(logDir string) error { - var logName = logDir + "/0chainBlobber.log" - zcncore.SetLogFile(logName, false) - zcncore.SetLogLevel(3) - if err := zcncore.InitZCNSDK(serverChain.BlockWorker, config.Configuration.SignatureScheme); err != nil { - return err - } - if err := zcncore.SetWalletInfo(node.Self.GetWalletString(), false); err != nil { - return err - } - go RegisterBlobber() - return nil -} - -/*HomePageHandler - provides basic info when accessing the home page of the server */ -func HomePageHandler(w http.ResponseWriter, r *http.Request) { - mc := chain.GetServerChain() - fmt.Fprintf(w, "
Running since %v ...\n", startTime) - fmt.Fprintf(w, "
Working on the chain: %v
\n", mc.ID) - fmt.Fprintf(w, "
I am a blobber with
  • id:%v
  • public_key:%v
  • build_tag:%v
\n", node.Self.ID, node.Self.PublicKey, build.BuildTag) - fmt.Fprintf(w, "
Miners ...\n") - network := zcncore.GetNetwork() - for _, miner := range network.Miners { - fmt.Fprintf(w, "%v\n", miner) - } - fmt.Fprintf(w, "
Sharders ...\n") - for _, sharder := range network.Sharders { - fmt.Fprintf(w, "%v\n", sharder) - } -} diff --git a/code/go/0chain.net/blobbercore/allocation/entity.go b/code/go/0chain.net/blobbercore/allocation/entity.go index 62a11125a..3070b4d8d 100644 --- a/code/go/0chain.net/blobbercore/allocation/entity.go +++ b/code/go/0chain.net/blobbercore/allocation/entity.go @@ -25,6 +25,8 @@ type Allocation struct { UsedSize int64 `gorm:"column:used_size"` OwnerID string `gorm:"column:owner_id"` OwnerPublicKey string `gorm:"column:owner_public_key"` + RepairerID string `gorm:"column:repairer_id"`// experimental / blobber node id + PayerID string `gorm:"column:payer_id"` // optional / client paying for all r/w ops Expiration common.Timestamp `gorm:"column:expiration_date"` AllocationRoot string `gorm:"column:allocation_root"` BlobberSize int64 `gorm:"column:blobber_size"` @@ -32,14 +34,12 @@ type Allocation struct { LatestRedeemedWM string `gorm:"column:latest_redeemed_write_marker"` IsRedeemRequired bool `gorm:"column:is_redeem_required"` TimeUnit time.Duration `gorm:"column:time_unit"` - // ending and cleaning - CleanedUp bool `gorm:"column:cleaned_up"` - Finalized bool `gorm:"column:finalized"` - // Has many terms. - Terms []*Terms `gorm:"-"` - - // Used for 3rd party/payer operations - PayerID string `gorm:"column:payer_id"` + IsImmutable bool `gorm:"is_immutable"` + // Ending and cleaning + CleanedUp bool `gorm:"column:cleaned_up"` + Finalized bool `gorm:"column:finalized"` + // Has many terms + Terms []*Terms `gorm:"-"` } func (Allocation) TableName() string { diff --git a/code/go/0chain.net/blobbercore/allocation/newfilechange.go b/code/go/0chain.net/blobbercore/allocation/newfilechange.go index 280e2cd6d..346ea89fb 100644 --- a/code/go/0chain.net/blobbercore/allocation/newfilechange.go +++ b/code/go/0chain.net/blobbercore/allocation/newfilechange.go @@ -32,6 +32,15 @@ type NewFileChange struct { EncryptedKey string `json:"encrypted_key,omitempty"` CustomMeta string `json:"custom_meta,omitempty"` Attributes reference.Attributes `json:"attributes,omitempty"` + + // IsResumable the request is resumable upload + IsResumable bool `json:"is_resumable,omitempty"` + // UploadLength indicates the size of the entire upload in bytes. The value MUST be a non-negative integer. + UploadLength int64 `json:"upload_length,omitempty"` + // Upload-Offset indicates a byte offset within a resource. The value MUST be a non-negative integer. + UploadOffset int64 `json:"upload_offset,omitempty"` + // IsFinal the request is final chunk + IsFinal bool `json:"is_final,omitempty"` } func (nf *NewFileChange) ProcessChange(ctx context.Context, diff --git a/code/go/0chain.net/blobbercore/allocation/protocol.go b/code/go/0chain.net/blobbercore/allocation/protocol.go index 01c07bf26..9239ef701 100644 --- a/code/go/0chain.net/blobbercore/allocation/protocol.go +++ b/code/go/0chain.net/blobbercore/allocation/protocol.go @@ -122,11 +122,12 @@ func VerifyAllocationTransaction(ctx context.Context, allocationTx string, a.Expiration = sa.Expiration a.OwnerID = sa.OwnerID a.OwnerPublicKey = sa.OwnerPublicKey + a.RepairerID = t.ClientID // blobber node id a.TotalSize = sa.Size a.UsedSize = sa.UsedSize a.Finalized = sa.Finalized - a.PayerID = t.ClientID a.TimeUnit = sa.TimeUnit + a.IsImmutable = sa.IsImmutable // related terms a.Terms = make([]*Terms, 0, len(sa.BlobberDetails)) diff --git a/code/go/0chain.net/blobbercore/blobbergrpc/blobber.pb.go b/code/go/0chain.net/blobbercore/blobbergrpc/blobber.pb.go index 7f2063828..2fb934d7c 100644 --- a/code/go/0chain.net/blobbercore/blobbergrpc/blobber.pb.go +++ b/code/go/0chain.net/blobbercore/blobbergrpc/blobber.pb.go @@ -1435,17 +1435,18 @@ type Allocation struct { UsedSize int64 `protobuf:"varint,4,opt,name=UsedSize,proto3" json:"UsedSize,omitempty"` OwnerID string `protobuf:"bytes,5,opt,name=OwnerID,proto3" json:"OwnerID,omitempty"` OwnerPublicKey string `protobuf:"bytes,6,opt,name=OwnerPublicKey,proto3" json:"OwnerPublicKey,omitempty"` - Expiration int64 `protobuf:"varint,7,opt,name=Expiration,proto3" json:"Expiration,omitempty"` - AllocationRoot string `protobuf:"bytes,8,opt,name=AllocationRoot,proto3" json:"AllocationRoot,omitempty"` - BlobberSize int64 `protobuf:"varint,9,opt,name=BlobberSize,proto3" json:"BlobberSize,omitempty"` - BlobberSizeUsed int64 `protobuf:"varint,10,opt,name=BlobberSizeUsed,proto3" json:"BlobberSizeUsed,omitempty"` - LatestRedeemedWM string `protobuf:"bytes,11,opt,name=LatestRedeemedWM,proto3" json:"LatestRedeemedWM,omitempty"` - IsRedeemRequired bool `protobuf:"varint,12,opt,name=IsRedeemRequired,proto3" json:"IsRedeemRequired,omitempty"` - TimeUnit int64 `protobuf:"varint,13,opt,name=TimeUnit,proto3" json:"TimeUnit,omitempty"` - CleanedUp bool `protobuf:"varint,14,opt,name=CleanedUp,proto3" json:"CleanedUp,omitempty"` - Finalized bool `protobuf:"varint,15,opt,name=Finalized,proto3" json:"Finalized,omitempty"` - Terms []*Term `protobuf:"bytes,16,rep,name=Terms,proto3" json:"Terms,omitempty"` - PayerID string `protobuf:"bytes,17,opt,name=PayerID,proto3" json:"PayerID,omitempty"` + RepairerID string `protobuf:"bytes,7,opt,name=RepairerID,proto3" json:"RepairerID,omitempty"` + PayerID string `protobuf:"bytes,8,opt,name=PayerID,proto3" json:"PayerID,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration,proto3" json:"Expiration,omitempty"` + AllocationRoot string `protobuf:"bytes,10,opt,name=AllocationRoot,proto3" json:"AllocationRoot,omitempty"` + BlobberSize int64 `protobuf:"varint,11,opt,name=BlobberSize,proto3" json:"BlobberSize,omitempty"` + BlobberSizeUsed int64 `protobuf:"varint,12,opt,name=BlobberSizeUsed,proto3" json:"BlobberSizeUsed,omitempty"` + LatestRedeemedWM string `protobuf:"bytes,13,opt,name=LatestRedeemedWM,proto3" json:"LatestRedeemedWM,omitempty"` + IsRedeemRequired bool `protobuf:"varint,14,opt,name=IsRedeemRequired,proto3" json:"IsRedeemRequired,omitempty"` + TimeUnit int64 `protobuf:"varint,15,opt,name=TimeUnit,proto3" json:"TimeUnit,omitempty"` + CleanedUp bool `protobuf:"varint,16,opt,name=CleanedUp,proto3" json:"CleanedUp,omitempty"` + Finalized bool `protobuf:"varint,17,opt,name=Finalized,proto3" json:"Finalized,omitempty"` + Terms []*Term `protobuf:"bytes,18,rep,name=Terms,proto3" json:"Terms,omitempty"` } func (x *Allocation) Reset() { @@ -1522,6 +1523,20 @@ func (x *Allocation) GetOwnerPublicKey() string { return "" } +func (x *Allocation) GetRepairerID() string { + if x != nil { + return x.RepairerID + } + return "" +} + +func (x *Allocation) GetPayerID() string { + if x != nil { + return x.PayerID + } + return "" +} + func (x *Allocation) GetExpiration() int64 { if x != nil { return x.Expiration @@ -1592,13 +1607,6 @@ func (x *Allocation) GetTerms() []*Term { return nil } -func (x *Allocation) GetPayerID() string { - if x != nil { - return x.PayerID - } - return "" -} - type Term struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2319,7 +2327,7 @@ var file_blobber_proto_rawDesc = []byte{ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0xb6, 0x04, 0x0a, 0x0a, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x6e, 0x22, 0xd6, 0x04, 0x0a, 0x0a, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x54, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x54, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, @@ -2330,31 +2338,33 @@ var file_blobber_proto_rawDesc = []byte{ 0x65, 0x72, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, - 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0e, - 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x53, - 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x42, 0x6c, 0x6f, 0x62, 0x62, - 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, - 0x72, 0x53, 0x69, 0x7a, 0x65, 0x55, 0x73, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0f, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x55, 0x73, 0x65, 0x64, - 0x12, 0x2a, 0x0a, 0x10, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x64, 0x65, 0x65, 0x6d, - 0x65, 0x64, 0x57, 0x4d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x4c, 0x61, 0x74, 0x65, - 0x73, 0x74, 0x52, 0x65, 0x64, 0x65, 0x65, 0x6d, 0x65, 0x64, 0x57, 0x4d, 0x12, 0x2a, 0x0a, 0x10, - 0x49, 0x73, 0x52, 0x65, 0x64, 0x65, 0x65, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x49, 0x73, 0x52, 0x65, 0x64, 0x65, 0x65, 0x6d, - 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x54, 0x69, 0x6d, 0x65, - 0x55, 0x6e, 0x69, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x54, 0x69, 0x6d, 0x65, - 0x55, 0x6e, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, 0x55, - 0x70, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, - 0x55, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, - 0x12, 0x2e, 0x0a, 0x05, 0x54, 0x65, 0x72, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x52, 0x05, 0x54, 0x65, 0x72, 0x6d, 0x73, - 0x12, 0x18, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x65, 0x72, 0x49, 0x44, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x50, 0x61, 0x79, 0x65, 0x72, 0x49, 0x44, 0x22, 0x96, 0x01, 0x0a, 0x04, 0x54, + 0x52, 0x65, 0x70, 0x61, 0x69, 0x72, 0x65, 0x72, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x52, 0x65, 0x70, 0x61, 0x69, 0x72, 0x65, 0x72, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, + 0x50, 0x61, 0x79, 0x65, 0x72, 0x49, 0x44, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x50, + 0x61, 0x79, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x20, + 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0b, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x28, 0x0a, 0x0f, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x55, + 0x73, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x42, 0x6c, 0x6f, 0x62, 0x62, + 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x55, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4c, 0x61, + 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x64, 0x65, 0x65, 0x6d, 0x65, 0x64, 0x57, 0x4d, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x64, 0x65, + 0x65, 0x6d, 0x65, 0x64, 0x57, 0x4d, 0x12, 0x2a, 0x0a, 0x10, 0x49, 0x73, 0x52, 0x65, 0x64, 0x65, + 0x65, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x49, 0x73, 0x52, 0x65, 0x64, 0x65, 0x65, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, 0x1c, + 0x0a, 0x09, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x12, 0x1c, 0x0a, 0x09, + 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x05, 0x54, 0x65, + 0x72, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x62, + 0x62, 0x65, 0x72, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x65, 0x72, 0x6d, 0x52, 0x05, 0x54, 0x65, 0x72, 0x6d, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x04, 0x54, 0x65, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1c, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x42, 0x6c, 0x6f, 0x62, 0x62, 0x65, 0x72, 0x49, diff --git a/code/go/0chain.net/blobbercore/blobbergrpc/proto/blobber.proto b/code/go/0chain.net/blobbercore/blobbergrpc/proto/blobber.proto index 449b3d8c3..0fb9e7e3a 100644 --- a/code/go/0chain.net/blobbercore/blobbergrpc/proto/blobber.proto +++ b/code/go/0chain.net/blobbercore/blobbergrpc/proto/blobber.proto @@ -184,17 +184,18 @@ message Allocation { int64 UsedSize = 4; string OwnerID = 5; string OwnerPublicKey = 6; - int64 Expiration = 7; - string AllocationRoot = 8; - int64 BlobberSize = 9; - int64 BlobberSizeUsed = 10; - string LatestRedeemedWM = 11; - bool IsRedeemRequired = 12; - int64 TimeUnit = 13; - bool CleanedUp = 14; - bool Finalized = 15; - repeated Term Terms = 16; - string PayerID = 17; + string RepairerID = 7; + string PayerID = 8; + int64 Expiration = 9; + string AllocationRoot = 10; + int64 BlobberSize = 11; + int64 BlobberSizeUsed = 12; + string LatestRedeemedWM = 13; + bool IsRedeemRequired = 14; + int64 TimeUnit = 15; + bool CleanedUp = 16; + bool Finalized = 17; + repeated Term Terms = 18; } message Term { diff --git a/code/go/0chain.net/blobbercore/challenge/entity.go b/code/go/0chain.net/blobbercore/challenge/entity.go index 283c90f46..c47fc7fc3 100644 --- a/code/go/0chain.net/blobbercore/challenge/entity.go +++ b/code/go/0chain.net/blobbercore/challenge/entity.go @@ -75,6 +75,7 @@ type ChallengeEntity struct { ValidationTickets []*ValidationTicket `json:"validation_tickets" gorm:"-"` ObjectPathString datatypes.JSON `json:"-" gorm:"column:object_path"` ObjectPath *reference.ObjectPath `json:"object_path" gorm:"-"` + Created common.Timestamp `json:"created" gorm:"-"` } func (ChallengeEntity) TableName() string { diff --git a/code/go/0chain.net/blobbercore/challenge/worker.go b/code/go/0chain.net/blobbercore/challenge/worker.go index 7fca5bc7e..1c394d97c 100644 --- a/code/go/0chain.net/blobbercore/challenge/worker.go +++ b/code/go/0chain.net/blobbercore/challenge/worker.go @@ -48,7 +48,6 @@ func SubmitProcessedChallenges(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() default: - Logger.Info("Attempting to commit processed challenges...") rctx := datastore.GetStore().CreateTransaction(ctx) db := datastore.GetStore().GetTransaction(rctx) //lastChallengeRedeemed := &ChallengeEntity{} @@ -199,18 +198,22 @@ func FindChallenges(ctx context.Context) { params := make(map[string]string) params["blobber"] = node.Self.ID + var blobberChallenges BCChallengeResponse blobberChallenges.Challenges = make([]*ChallengeEntity, 0) retBytes, err := transaction.MakeSCRestAPICall(transaction.STORAGE_CONTRACT_ADDRESS, "/openchallenges", params, chain.GetServerChain(), nil) + if err != nil { Logger.Error("Error getting the open challenges from the blockchain", zap.Error(err)) } else { tCtx := datastore.GetStore().CreateTransaction(ctx) db := datastore.GetStore().GetTransaction(tCtx) bytesReader := bytes.NewBuffer(retBytes) + d := json.NewDecoder(bytesReader) d.UseNumber() errd := d.Decode(&blobberChallenges) + if errd != nil { Logger.Error("Error in unmarshal of the sharder response", zap.Error(errd)) } else { @@ -219,8 +222,10 @@ func FindChallenges(ctx context.Context) { Logger.Info("No challenge entity from the challenge map") continue } + challengeObj := v _, err := GetChallengeEntity(tCtx, challengeObj.ChallengeID) + if errors.Is(err, gorm.ErrRecordNotFound) { latestChallenge, err := GetLastChallengeEntity(tCtx) if err == nil || errors.Is(err, gorm.ErrRecordNotFound) { diff --git a/code/go/0chain.net/blobbercore/config/config.go b/code/go/0chain.net/blobbercore/config/config.go index bf95a8d8f..2830bda9d 100644 --- a/code/go/0chain.net/blobbercore/config/config.go +++ b/code/go/0chain.net/blobbercore/config/config.go @@ -63,6 +63,11 @@ const ( DeploymentMainNet = 2 ) +type GeolocationConfig struct { + Latitude float64 `mapstructure:"latitude"` + Longitude float64 `mapstructure:"longitude"` +} + type Config struct { *config.Config DBHost string @@ -118,6 +123,8 @@ type Config struct { NumDelegates int `json:"num_delegates"` // ServiceCharge for blobber. ServiceCharge float64 `json:"service_charge"` + + Geolocation GeolocationConfig `mapstructure:"geolocation"` } /*Configuration of the system */ @@ -133,6 +140,17 @@ func Development() bool { return Configuration.DeploymentMode == DeploymentDevelopment } +// get validated geolocatiion +func Geolocation() GeolocationConfig { + g := Configuration.Geolocation + if g.Latitude > 90.00 || g.Latitude < -90.00 || + g.Longitude > 180.00 || g.Longitude < -180.00 { + panic("Fatal error in config file") + + } + return g +} + /*ErrSupportedChain error for indicating which chain is supported by the server */ var ErrSupportedChain error diff --git a/code/go/0chain.net/blobbercore/filestore/chunk_writer.go b/code/go/0chain.net/blobbercore/filestore/chunk_writer.go new file mode 100644 index 000000000..e1e027f03 --- /dev/null +++ b/code/go/0chain.net/blobbercore/filestore/chunk_writer.go @@ -0,0 +1,115 @@ +package filestore + +import ( + "context" + "errors" + "io" + "os" +) + +//ChunkWriter implements a chunk write that will append content to the file +type ChunkWriter struct { + file string + writer *os.File + reader *os.File + offset int64 + size int64 +} + +//NewChunkWriter create a ChunkWriter +func NewChunkWriter(file string) (*ChunkWriter, error) { + w := &ChunkWriter{ + file: file, + } + var f *os.File + fi, err := os.Stat(file) + if errors.Is(err, os.ErrNotExist) { + f, err = os.Create(file) + if err != nil { + return nil, err + } + } else { + f, err = os.OpenFile(file, os.O_RDONLY|os.O_CREATE|os.O_WRONLY, os.ModeAppend) + if err != nil { + return nil, err + } + + w.size = fi.Size() + w.offset = fi.Size() + } + + w.writer = f + + return w, nil +} + +//Write implements io.Writer +func (w *ChunkWriter) Write(b []byte) (n int, err error) { + if w == nil || w.writer == nil { + return 0, os.ErrNotExist + } + + written, err := w.writer.Write(b) + + w.size += int64(written) + + return written, err +} + +//Reader implements io.Reader +func (w *ChunkWriter) Read(p []byte) (n int, err error) { + if w == nil || w.reader == nil { + reader, err := os.Open(w.file) + + if err != nil { + return 0, err + } + + w.reader = reader + } + + return w.reader.Read(p) +} + +//WriteChunk append data to the file +func (w *ChunkWriter) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { + if w == nil || w.writer == nil { + return 0, os.ErrNotExist + } + + _, err := w.writer.Seek(offset, io.SeekStart) + + if err != nil { + return 0, err + } + + n, err := io.Copy(w.writer, src) + + w.offset += n + w.size += n + + return n, err +} + +//Size length in bytes for regular files +func (w *ChunkWriter) Size() int64 { + if w == nil { + return 0 + } + return w.size +} + +//Close closes the underline File +func (w *ChunkWriter) Close() { + if w == nil { + return + } + + if w.writer != nil { + w.writer.Close() + } + + if w.reader != nil { + w.reader.Close() + } +} diff --git a/code/go/0chain.net/blobbercore/filestore/chunk_writer_test.go b/code/go/0chain.net/blobbercore/filestore/chunk_writer_test.go new file mode 100644 index 000000000..716dc3189 --- /dev/null +++ b/code/go/0chain.net/blobbercore/filestore/chunk_writer_test.go @@ -0,0 +1,93 @@ +package filestore + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWrite(t *testing.T) { + + fileName := filepath.Join(os.TempDir(), "testwrite_"+strconv.FormatInt(time.Now().Unix(), 10)) + + content := "this is full content" + + w, err := NewChunkWriter(fileName) + if err != nil { + require.Error(t, err, "failed to create ChunkWriter") + return + } + + _, err = w.Write([]byte(content)) + + if err != nil { + require.Error(t, err, "failed to ChunkWriter.WriteChunk") + return + } + + buf := make([]byte, w.Size()) + + //read all lines from file + _, err = w.Read(buf) + if err != nil { + require.Error(t, err, "failed to ChunkWriter.Read") + return + } + + assert.Equal(t, content, string(buf), "File content should be same") +} + +func TestWriteChunk(t *testing.T) { + + chunk1 := "this is 1st chunked" + + tempFile, err := ioutil.TempFile("", "") + + if err != nil { + require.Error(t, err, "failed to create tempfile") + return + } + offset, err := tempFile.Write([]byte(chunk1)) + if err != nil { + require.Error(t, err, "failed to write first chunk to tempfile") + return + } + + fileName := tempFile.Name() + tempFile.Close() + + w, err := NewChunkWriter(fileName) + if err != nil { + require.Error(t, err, "failed to create ChunkWriter") + return + } + defer w.Close() + + chunk2 := "this is 2nd chunk" + + _, err = w.WriteChunk(context.TODO(), int64(offset), strings.NewReader(chunk2)) + + if err != nil { + require.Error(t, err, "failed to ChunkWriter.WriteChunk") + return + } + + buf := make([]byte, w.Size()) + + //read all lines from file + _, err = w.Read(buf) + if err != nil { + require.Error(t, err, "failed to ChunkWriter.Read") + return + } + + assert.Equal(t, chunk1+chunk2, string(buf), "File content should be same") +} diff --git a/code/go/0chain.net/blobbercore/filestore/fs_store.go b/code/go/0chain.net/blobbercore/filestore/fs_store.go index 39c99426c..1ab9be0de 100644 --- a/code/go/0chain.net/blobbercore/filestore/fs_store.go +++ b/code/go/0chain.net/blobbercore/filestore/fs_store.go @@ -2,12 +2,14 @@ package filestore import ( "bytes" + "context" "crypto/sha1" "encoding/hex" "encoding/json" "fmt" "hash" "io" + "io/ioutil" "mime/multipart" "os" "path/filepath" @@ -503,21 +505,43 @@ func (fs *FileFSStore) WriteFile(allocationID string, fileData *FileInputData, return nil, common.NewError("filestore_setup_error", "Error setting the fs store. "+err.Error()) } - h := sha1.New() tempFilePath := fs.generateTempPath(allocation, fileData, connectionID) - dest, err := os.Create(tempFilePath) + dest, err := NewChunkWriter(tempFilePath) if err != nil { return nil, common.NewError("file_creation_error", err.Error()) } defer dest.Close() - // infile, err := hdr.Open() - // if err != nil { - // return nil, common.NewError("file_reading_error", err.Error()) - // } + + fileRef := &FileOutputData{} + var fileReader io.Reader = infile + + if fileData.IsResumable { + h := sha1.New() + offset, err := dest.WriteChunk(context.TODO(), fileData.UploadOffset, io.TeeReader(fileReader, h)) + + if err != nil { + return nil, common.NewError("file_write_error", err.Error()) + } + + fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) + fileRef.Size = dest.Size() + fileRef.Name = fileData.Name + fileRef.Path = fileData.Path + fileRef.UploadOffset = fileData.UploadOffset + offset + fileRef.UploadLength = fileData.UploadLength + + if !fileData.IsFinal { + //skip to compute hash until the last chunk is uploaded + return fileRef, nil + } + + fileReader = dest + } + + h := sha1.New() bytesBuffer := bytes.NewBuffer(nil) - //merkleHash := sha3.New256() multiHashWriter := io.MultiWriter(h, bytesBuffer) - tReader := io.TeeReader(infile, multiHashWriter) + tReader := io.TeeReader(fileReader, multiHashWriter) merkleHashes := make([]hash.Hash, 1024) merkleLeaves := make([]util.Hashable, 1024) for idx := range merkleHashes { @@ -525,7 +549,15 @@ func (fs *FileFSStore) WriteFile(allocationID string, fileData *FileInputData, } fileSize := int64(0) for { - written, err := io.CopyN(dest, tReader, CHUNK_SIZE) + var written int64 + + if fileData.IsResumable { + //all chunks have been written, only read bytes from local file , and compute hash + written, err = io.CopyN(ioutil.Discard, tReader, CHUNK_SIZE) + } else { + written, err = io.CopyN(dest, tReader, CHUNK_SIZE) + } + if err != io.EOF && err != nil { return nil, common.NewError("file_write_error", err.Error()) } @@ -541,7 +573,6 @@ func (fs *FileFSStore) WriteFile(allocationID string, fileData *FileInputData, merkleHashes[offset].Write(dataBytes[i:end]) } - // merkleLeaves = append(merkleLeaves, util.NewStringHashable(hex.EncodeToString(merkleHash.Sum(nil)))) bytesBuffer.Reset() if err != nil && err == io.EOF { break @@ -550,17 +581,21 @@ func (fs *FileFSStore) WriteFile(allocationID string, fileData *FileInputData, for idx := range merkleHashes { merkleLeaves[idx] = util.NewStringHashable(hex.EncodeToString(merkleHashes[idx].Sum(nil))) } - //Logger.Info("File size", zap.Int64("file_size", fileSize)) + var mt util.MerkleTreeI = &util.MerkleTree{} mt.ComputeTree(merkleLeaves) - //Logger.Info("Calculated Merkle root", zap.String("merkle_root", mt.GetRoot()), zap.Int("merkle_leaf_count", len(merkleLeaves))) - fileRef := &FileOutputData{} - fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) + //only update hash for whole file when it is not a resumable upload or is final chunk. + if !fileData.IsResumable || fileData.IsFinal { + fileRef.ContentHash = hex.EncodeToString(h.Sum(nil)) + } + fileRef.Size = fileSize fileRef.Name = fileData.Name fileRef.Path = fileData.Path fileRef.MerkleRoot = mt.GetRoot() + fileRef.UploadOffset = fileSize + fileRef.UploadLength = fileData.UploadLength return fileRef, nil } diff --git a/code/go/0chain.net/blobbercore/filestore/store.go b/code/go/0chain.net/blobbercore/filestore/store.go index 3e2b718fa..6980b4346 100644 --- a/code/go/0chain.net/blobbercore/filestore/store.go +++ b/code/go/0chain.net/blobbercore/filestore/store.go @@ -14,6 +14,15 @@ type FileInputData struct { Path string Hash string OnCloud bool + + //IsResumable the request is resumable upload + IsResumable bool + //UploadLength indicates the size of the entire upload in bytes. The value MUST be a non-negative integer. + UploadLength int64 + //Upload-Offset indicates a byte offset within a resource. The value MUST be a non-negative integer. + UploadOffset int64 + //IsFinal the request is final chunk + IsFinal bool } type FileOutputData struct { @@ -22,6 +31,11 @@ type FileOutputData struct { MerkleRoot string ContentHash string Size int64 + + //UploadLength indicates the size of the entire upload in bytes. The value MUST be a non-negative integer. + UploadLength int64 + //Upload-Offset indicates a byte offset within a resource. The value MUST be a non-negative integer. + UploadOffset int64 } type FileObjectHandler func(contentHash string, contentSize int64) diff --git a/code/go/0chain.net/blobbercore/handler/convert.go b/code/go/0chain.net/blobbercore/handler/convert.go index bc5f699f5..a61b61b31 100644 --- a/code/go/0chain.net/blobbercore/handler/convert.go +++ b/code/go/0chain.net/blobbercore/handler/convert.go @@ -25,6 +25,8 @@ func AllocationToGRPCAllocation(alloc *allocation.Allocation) *blobbergrpc.Alloc UsedSize: alloc.UsedSize, OwnerID: alloc.OwnerID, OwnerPublicKey: alloc.OwnerPublicKey, + RepairerID: alloc.RepairerID, + PayerID: alloc.PayerID, Expiration: int64(alloc.Expiration), AllocationRoot: alloc.AllocationRoot, BlobberSize: alloc.BlobberSize, @@ -35,7 +37,6 @@ func AllocationToGRPCAllocation(alloc *allocation.Allocation) *blobbergrpc.Alloc CleanedUp: alloc.CleanedUp, Finalized: alloc.Finalized, Terms: terms, - PayerID: alloc.PayerID, } } diff --git a/code/go/0chain.net/blobbercore/handler/dto.go b/code/go/0chain.net/blobbercore/handler/dto.go index 2edcf2b26..98bc27aee 100644 --- a/code/go/0chain.net/blobbercore/handler/dto.go +++ b/code/go/0chain.net/blobbercore/handler/dto.go @@ -12,6 +12,11 @@ type UploadResult struct { Size int64 `json:"size"` Hash string `json:"content_hash"` MerkleRoot string `json:"merkle_root"` + + //UploadLength indicates the size of the entire upload in bytes. The value MUST be a non-negative integer. + UploadLength int64 `json:"upload_length"` + //Upload-Offset indicates a byte offset within a resource. The value MUST be a non-negative integer. + UploadOffset int64 `json:"upload_offset"` } type CommitResult struct { diff --git a/code/go/0chain.net/blobbercore/handler/grpc_handler.go b/code/go/0chain.net/blobbercore/handler/grpc_handler.go index dfb361511..8a1f23ff4 100644 --- a/code/go/0chain.net/blobbercore/handler/grpc_handler.go +++ b/code/go/0chain.net/blobbercore/handler/grpc_handler.go @@ -44,11 +44,10 @@ func (b *blobberGRPCService) GetAllocation(ctx context.Context, request *blobber func (b *blobberGRPCService) GetFileMetaData(ctx context.Context, req *blobbergrpc.GetFileMetaDataRequest) (*blobbergrpc.GetFileMetaDataResponse, error) { logger := ctxzap.Extract(ctx) - allocationObj, err := b.storageHandler.verifyAllocation(ctx, req.Allocation, true) + alloc, err := b.storageHandler.verifyAllocation(ctx, req.Allocation, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - allocationID := allocationObj.ID clientID := req.Context.Client if len(clientID) == 0 { @@ -61,10 +60,10 @@ func (b *blobberGRPCService) GetFileMetaData(ctx context.Context, req *blobbergr if len(path) == 0 { return nil, common.NewError("invalid_parameters", "Invalid path") } - path_hash = reference.GetReferenceLookup(allocationID, path) + path_hash = reference.GetReferenceLookup(alloc.ID, path) } - fileref, err := b.packageHandler.GetReferenceFromLookupHash(ctx, allocationID, path_hash) + fileref, err := b.packageHandler.GetReferenceFromLookupHash(ctx, alloc.ID, path_hash) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid file path. "+err.Error()) } @@ -84,18 +83,22 @@ func (b *blobberGRPCService) GetFileMetaData(ctx context.Context, req *blobbergr logger.Error("Failed to get collaborators from refID", zap.Error(err), zap.Any("ref_id", fileref.ID)) } - authTokenString := req.AuthToken - - if (allocationObj.OwnerID != clientID && - allocationObj.PayerID != clientID && - !b.packageHandler.IsACollaborator(ctx, fileref.ID, clientID)) || len(authTokenString) > 0 { - authTicketVerified, err := b.storageHandler.verifyAuthTicket(ctx, req.AuthToken, allocationObj, fileref, clientID) - if err != nil { - return nil, err - } - if !authTicketVerified { - return nil, common.NewError("auth_ticket_verification_failed", "Could not verify the auth ticket.") + // authorize file access + var ( + isOwner = clientID == alloc.OwnerID + isRepairer = clientID == alloc.RepairerID + isCollaborator = b.packageHandler.IsACollaborator(ctx, fileref.ID, clientID) + ) + + if !isOwner && !isRepairer && !isCollaborator { + // check auth token + if isAuthorized, err := b.storageHandler.verifyAuthTicket(ctx, + req.AuthToken, alloc, fileref, clientID, + ); !isAuthorized { + return nil, common.NewErrorf("download_file", + "cannot verify auth ticket: %v", err) } + fileref.Path = "" } @@ -116,15 +119,14 @@ func (b *blobberGRPCService) GetFileMetaData(ctx context.Context, req *blobbergr func (b *blobberGRPCService) GetFileStats(ctx context.Context, req *blobbergrpc.GetFileStatsRequest) (*blobbergrpc.GetFileStatsResponse, error) { allocationTx := req.Context.Allocation - allocationObj, err := b.storageHandler.verifyAllocation(ctx, allocationTx, true) + alloc, err := b.storageHandler.verifyAllocation(ctx, allocationTx, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - allocationID := allocationObj.ID clientID := req.Context.Client - if len(clientID) == 0 || allocationObj.OwnerID != clientID { + if len(clientID) == 0 || alloc.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } @@ -134,10 +136,10 @@ func (b *blobberGRPCService) GetFileStats(ctx context.Context, req *blobbergrpc. if len(path) == 0 { return nil, common.NewError("invalid_parameters", "Invalid path") } - path_hash = reference.GetReferenceLookup(allocationID, path) + path_hash = reference.GetReferenceLookup(alloc.ID, path) } - fileref, err := b.packageHandler.GetReferenceFromLookupHash(ctx, allocationID, path_hash) + fileref, err := b.packageHandler.GetReferenceFromLookupHash(ctx, alloc.ID, path_hash) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid file path. "+err.Error()) @@ -164,12 +166,11 @@ func (b *blobberGRPCService) ListEntities(ctx context.Context, req *blobbergrpc. clientID := req.Context.Client allocationTx := req.Context.Allocation - allocationObj, err := b.storageHandler.verifyAllocation(ctx, allocationTx, true) + alloc, err := b.storageHandler.verifyAllocation(ctx, allocationTx, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - allocationID := allocationObj.ID if len(clientID) == 0 { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") @@ -181,18 +182,18 @@ func (b *blobberGRPCService) ListEntities(ctx context.Context, req *blobbergrpc. if len(path) == 0 { return nil, common.NewError("invalid_parameters", "Invalid path") } - path_hash = reference.GetReferenceLookup(allocationID, path) + path_hash = reference.GetReferenceLookup(alloc.ID, path) } logger.Info("Path Hash for list dir :" + path_hash) - fileref, err := b.packageHandler.GetReferenceFromLookupHash(ctx, allocationID, path_hash) + fileref, err := b.packageHandler.GetReferenceFromLookupHash(ctx, alloc.ID, path_hash) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid path. "+err.Error()) } authTokenString := req.AuthToken - if clientID != allocationObj.OwnerID || len(authTokenString) > 0 { - authTicketVerified, err := b.storageHandler.verifyAuthTicket(ctx, authTokenString, allocationObj, fileref, clientID) + if clientID != alloc.OwnerID || len(authTokenString) > 0 { + authTicketVerified, err := b.storageHandler.verifyAuthTicket(ctx, authTokenString, alloc, fileref, clientID) if err != nil { return nil, err } @@ -201,18 +202,18 @@ func (b *blobberGRPCService) ListEntities(ctx context.Context, req *blobbergrpc. } } - dirref, err := b.packageHandler.GetRefWithChildren(ctx, allocationID, fileref.Path) + dirref, err := b.packageHandler.GetRefWithChildren(ctx, alloc.ID, fileref.Path) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid path. "+err.Error()) } - if clientID != allocationObj.OwnerID { + if clientID != alloc.OwnerID { dirref.Path = "" } var entities []*blobbergrpc.FileRef for _, entity := range dirref.Children { - if clientID != allocationObj.OwnerID { + if clientID != alloc.OwnerID { entity.Path = "" } entities = append(entities, reference.FileRefToFileRefGRPC(entity)) @@ -221,22 +222,21 @@ func (b *blobberGRPCService) ListEntities(ctx context.Context, req *blobbergrpc. refGRPC.DirMetaData.Children = entities return &blobbergrpc.ListEntitiesResponse{ - AllocationRoot: allocationObj.AllocationRoot, + AllocationRoot: alloc.AllocationRoot, MetaData: refGRPC, }, nil } func (b *blobberGRPCService) GetObjectPath(ctx context.Context, req *blobbergrpc.GetObjectPathRequest) (*blobbergrpc.GetObjectPathResponse, error) { allocationTx := req.Context.Allocation - allocationObj, err := b.storageHandler.verifyAllocation(ctx, allocationTx, false) + alloc, err := b.storageHandler.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - allocationID := allocationObj.ID clientID := req.Context.Client - if len(clientID) == 0 || allocationObj.OwnerID != clientID { + if len(clientID) == 0 || alloc.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } path := req.Path @@ -254,16 +254,16 @@ func (b *blobberGRPCService) GetObjectPath(ctx context.Context, req *blobbergrpc return nil, common.NewError("invalid_parameters", "Invalid block number") } - objectPath, err := b.packageHandler.GetObjectPathGRPC(ctx, allocationID, blockNum) + objectPath, err := b.packageHandler.GetObjectPathGRPC(ctx, alloc.ID, blockNum) if err != nil { return nil, err } var latestWM *writemarker.WriteMarkerEntity - if len(allocationObj.AllocationRoot) == 0 { + if len(alloc.AllocationRoot) == 0 { latestWM = nil } else { - latestWM, err = b.packageHandler.GetWriteMarkerEntity(ctx, allocationObj.AllocationRoot) + latestWM, err = b.packageHandler.GetWriteMarkerEntity(ctx, alloc.AllocationRoot) if err != nil { return nil, common.NewError("latest_write_marker_read_error", "Error reading the latest write marker for allocation."+err.Error()) } @@ -281,12 +281,11 @@ func (b *blobberGRPCService) GetObjectPath(ctx context.Context, req *blobbergrpc func (b *blobberGRPCService) GetReferencePath(ctx context.Context, req *blobbergrpc.GetReferencePathRequest) (*blobbergrpc.GetReferencePathResponse, error) { allocationTx := req.Context.Allocation - allocationObj, err := b.storageHandler.verifyAllocation(ctx, allocationTx, false) + alloc, err := b.storageHandler.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - allocationID := allocationObj.ID clientID := req.Context.Client if len(clientID) == 0 { @@ -308,7 +307,7 @@ func (b *blobberGRPCService) GetReferencePath(ctx context.Context, req *blobberg } } - rootRef, err := b.packageHandler.GetReferencePathFromPaths(ctx, allocationID, paths) + rootRef, err := b.packageHandler.GetReferencePathFromPaths(ctx, alloc.ID, paths) if err != nil { return nil, err } @@ -330,10 +329,10 @@ func (b *blobberGRPCService) GetReferencePath(ctx context.Context, req *blobberg } var latestWM *writemarker.WriteMarkerEntity - if len(allocationObj.AllocationRoot) == 0 { + if len(alloc.AllocationRoot) == 0 { latestWM = nil } else { - latestWM, err = writemarker.GetWriteMarkerEntity(ctx, allocationObj.AllocationRoot) + latestWM, err = writemarker.GetWriteMarkerEntity(ctx, alloc.AllocationRoot) if err != nil { return nil, common.NewError("latest_write_marker_read_error", "Error reading the latest write marker for allocation."+err.Error()) } @@ -349,15 +348,14 @@ func (b *blobberGRPCService) GetReferencePath(ctx context.Context, req *blobberg func (b *blobberGRPCService) GetObjectTree(ctx context.Context, req *blobbergrpc.GetObjectTreeRequest) (*blobbergrpc.GetObjectTreeResponse, error) { allocationTx := req.Context.Allocation - allocationObj, err := b.storageHandler.verifyAllocation(ctx, allocationTx, false) + alloc, err := b.storageHandler.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - allocationID := allocationObj.ID clientID := req.Context.Client - if len(clientID) == 0 || allocationObj.OwnerID != clientID { + if len(clientID) == 0 || alloc.OwnerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } path := req.Path @@ -365,7 +363,7 @@ func (b *blobberGRPCService) GetObjectTree(ctx context.Context, req *blobbergrpc return nil, common.NewError("invalid_parameters", "Invalid path") } - rootRef, err := b.packageHandler.GetObjectTree(ctx, allocationID, path) + rootRef, err := b.packageHandler.GetObjectTree(ctx, alloc.ID, path) if err != nil { return nil, err } @@ -387,10 +385,10 @@ func (b *blobberGRPCService) GetObjectTree(ctx context.Context, req *blobbergrpc } var latestWM *writemarker.WriteMarkerEntity - if len(allocationObj.AllocationRoot) == 0 { + if len(alloc.AllocationRoot) == 0 { latestWM = nil } else { - latestWM, err = writemarker.GetWriteMarkerEntity(ctx, allocationObj.AllocationRoot) + latestWM, err = writemarker.GetWriteMarkerEntity(ctx, alloc.AllocationRoot) if err != nil { return nil, common.NewError("latest_write_marker_read_error", "Error reading the latest write marker for allocation."+err.Error()) } diff --git a/code/go/0chain.net/blobbercore/handler/helper.go b/code/go/0chain.net/blobbercore/handler/helper.go index b412aa718..4cced3dbb 100644 --- a/code/go/0chain.net/blobbercore/handler/helper.go +++ b/code/go/0chain.net/blobbercore/handler/helper.go @@ -16,12 +16,9 @@ import ( ) func setupGRPCHandlerContext(ctx context.Context, r *blobbergrpc.RequestContext) context.Context { - ctx = context.WithValue(ctx, constants.CLIENT_CONTEXT_KEY, - r.Client) - ctx = context.WithValue(ctx, constants.CLIENT_KEY_CONTEXT_KEY, - r.ClientKey) - ctx = context.WithValue(ctx, constants.ALLOCATION_CONTEXT_KEY, - r.Allocation) + ctx = context.WithValue(ctx, constants.CLIENT_CONTEXT_KEY, r.Client) + ctx = context.WithValue(ctx, constants.CLIENT_KEY_CONTEXT_KEY, r.ClientKey) + ctx = context.WithValue(ctx, constants.ALLOCATION_CONTEXT_KEY, r.Allocation) return ctx } diff --git a/code/go/0chain.net/blobbercore/handler/object_operation_handler.go b/code/go/0chain.net/blobbercore/handler/object_operation_handler.go index d7fb4eba1..8db11078e 100644 --- a/code/go/0chain.net/blobbercore/handler/object_operation_handler.go +++ b/code/go/0chain.net/blobbercore/handler/object_operation_handler.go @@ -184,38 +184,34 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( "invalid method used (GET), use POST instead") } + // get client and allocation ids var ( - allocationTx = ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) clientID = ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) - - allocationObj *allocation.Allocation + allocationTx = ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) + _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) // runtime type check + alloc *allocation.Allocation ) + // check client if len(clientID) == 0 { return nil, common.NewError("download_file", "invalid client") } - // runtime type check - _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) - - // verify or update allocation - allocationObj, err = fsh.verifyAllocation(ctx, allocationTx, false) + // get and check allocation + alloc, err = fsh.verifyAllocation(ctx, allocationTx, false) if err != nil { return nil, common.NewErrorf("download_file", "invalid allocation id passed: %v", err) } - var allocationID = allocationObj.ID - + // get and parse file params if err = r.ParseMultipartForm(FORM_FILE_PARSE_MAX_MEMORY); nil != err { Logger.Info("download_file - request_parse_error", zap.Error(err)) return nil, common.NewErrorf("download_file", "request_parse_error: %v", err) } - rxPay := r.FormValue("rx_pay") == "true" - - pathHash, err := pathHashFromReq(r, allocationID) + pathHash, err := pathHashFromReq(r, alloc.ID) if err != nil { return nil, common.NewError("download_file", "invalid path") } @@ -243,6 +239,7 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( "invalid number of blocks") } + // get read marker var ( readMarkerString = r.FormValue("read_marker") readMarker = &readmarker.ReadMarker{} @@ -256,14 +253,14 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( var rmObj = &readmarker.ReadMarkerEntity{} rmObj.LatestRM = readMarker - if err = rmObj.VerifyMarker(ctx, allocationObj); err != nil { + if err = rmObj.VerifyMarker(ctx, alloc); err != nil { return nil, common.NewErrorf("download_file", "invalid read marker, "+ "failed to verify the read marker: %v", err) } + // get file reference var fileref *reference.Ref - fileref, err = reference.GetReferenceFromLookupHash(ctx, allocationID, - pathHash) + fileref, err = reference.GetReferenceFromLookupHash(ctx, alloc.ID, pathHash) if err != nil { return nil, common.NewErrorf("download_file", "invalid file path: %v", err) @@ -274,40 +271,37 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( "path is not a file: %v", err) } - var ( - authTokenString = r.FormValue("auth_token") - clientIDForReadRedeem = clientID // default payer is client - isACollaborator = reference.IsACollaborator(ctx, fileref.ID, clientID) - ) + // set payer: default + var payerID = alloc.OwnerID - // Owner will pay for collaborator - if isACollaborator { - clientIDForReadRedeem = allocationObj.OwnerID + // set payer: check for explicit allocation payer value + if len(alloc.PayerID) > 0 { + payerID = alloc.PayerID } - var attrs *reference.Attributes - if attrs, err = fileref.GetAttributes(); err != nil { - return nil, common.NewErrorf("download_file", - "error getting file attributes: %v", err) + // set payer: check for command line payer flag (--rx_pay) + if r.FormValue("rx_pay") == "true" { + payerID = clientID } - var authToken *readmarker.AuthTicket = nil + // authorize file access + var ( + isOwner = clientID == alloc.OwnerID + isRepairer = clientID == alloc.RepairerID + isCollaborator = reference.IsACollaborator(ctx, fileref.ID, clientID) + ) - if (allocationObj.OwnerID != clientID && - allocationObj.PayerID != clientID && - !isACollaborator) || len(authTokenString) > 0 { + var authToken *readmarker.AuthTicket = nil - var authTicketVerified bool - authTicketVerified, err = fsh.verifyAuthTicket(ctx, r.FormValue("auth_token"), allocationObj, - fileref, clientID) - if err != nil { - return nil, common.NewErrorf("download_file", - "verifying auth ticket: %v", err) - } + if (!isOwner && !isRepairer && !isCollaborator) || len(r.FormValue("auth_token")) > 0 { + var authTokenString = r.FormValue("auth_token") - if !authTicketVerified { + // check auth token + if isAuthorized, err := fsh.verifyAuthTicket(ctx, + authTokenString, alloc, fileref, clientID, + ); !isAuthorized { return nil, common.NewErrorf("download_file", - "could not verify the auth ticket") + "cannot verify auth ticket: %v", err) } authToken = &readmarker.AuthTicket{} @@ -335,22 +329,26 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( return nil, errors.New("auth ticket is not authorized to download file specified") } } + readMarker.AuthTicket = datatypes.JSON(authTokenString) - // if --rx_pay used 3rd_party pays - if rxPay { - clientIDForReadRedeem = clientID - } else if attrs.WhoPaysForReads == common.WhoPaysOwner { - clientIDForReadRedeem = allocationObj.OwnerID // owner pays + // check for file payer flag + if fileAttrs, err := fileref.GetAttributes(); err != nil { + return nil, common.NewErrorf("download_file", + "error getting file attributes: %v", err) + } else { + if fileAttrs.WhoPaysForReads == common.WhoPays3rdParty { + payerID = clientID + } } - - readMarker.AuthTicket = datatypes.JSON(authTokenString) } + // create read marker var ( rme *readmarker.ReadMarkerEntity latestRM *readmarker.ReadMarker pendNumBlocks int64 ) + rme, err = readmarker.GetLatestReadMarkerEntity(ctx, clientID) if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { return nil, common.NewErrorf("download_file", @@ -378,15 +376,13 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( } // check out read pool tokens if read_price > 0 - err = readPreRedeem(ctx, allocationObj, numBlocks, pendNumBlocks, - clientIDForReadRedeem) + err = readPreRedeem(ctx, alloc, numBlocks, pendNumBlocks, payerID) if err != nil { return nil, common.NewErrorf("download_file", "pre-redeeming read marker: %v", err) } - // reading allowed - + // reading is allowed var ( downloadMode = r.FormValue("content") respData []byte @@ -397,7 +393,7 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( fileData.Path = fileref.Path fileData.Hash = fileref.ThumbnailHash fileData.OnCloud = fileref.OnCloud - respData, err = filestore.GetFileStore().GetFileBlock(allocationID, + respData, err = filestore.GetFileStore().GetFileBlock(alloc.ID, fileData, blockNum, numBlocks) if err != nil { return nil, common.NewErrorf("download_file", @@ -409,7 +405,7 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( fileData.Path = fileref.Path fileData.Hash = fileref.ContentHash fileData.OnCloud = fileref.OnCloud - respData, err = filestore.GetFileStore().GetFileBlock(allocationID, + respData, err = filestore.GetFileStore().GetFileBlock(alloc.ID, fileData, blockNum, numBlocks) if err != nil { return nil, common.NewErrorf("download_file", @@ -417,7 +413,7 @@ func (fsh *StorageHandler) DownloadFile(ctx context.Context, r *http.Request) ( } } - readMarker.PayerID = clientIDForReadRedeem + readMarker.PayerID = payerID err = readmarker.SaveLatestReadMarker(ctx, readMarker, latestRM == nil) if err != nil { return nil, common.NewErrorf("download_file", @@ -511,6 +507,10 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*C return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } + if allocationObj.IsImmutable { + return nil, common.NewError("immutable_allocation", "Cannot write to an immutable allocation") + } + allocationID := allocationObj.ID connectionID := r.FormValue("connection_id") @@ -532,7 +532,7 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*C "Invalid connection id. Connection does not have any changes.") } - var isACollaborator bool + var isCollaborator bool for _, change := range connectionObj.Changes { if change.Operation == allocation.UPDATE_OPERATION { updateFileChange := new(allocation.UpdateFileChange) @@ -543,7 +543,7 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*C if err != nil { return nil, err } - isACollaborator = reference.IsACollaborator(ctx, fileRef.ID, clientID) + isCollaborator = reference.IsACollaborator(ctx, fileRef.ID, clientID) break } } @@ -552,7 +552,7 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*C return nil, common.NewError("invalid_params", "Please provide clientID and clientKey") } - if (allocationObj.OwnerID != clientID || encryption.Hash(clientKeyBytes) != clientID) && !isACollaborator { + if (allocationObj.OwnerID != clientID || encryption.Hash(clientKeyBytes) != clientID) && !isCollaborator { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner of the allocation") } @@ -603,7 +603,7 @@ func (fsh *StorageHandler) CommitWrite(ctx context.Context, r *http.Request) (*C } var clientIDForWriteRedeem = writeMarker.ClientID - if isACollaborator { + if isCollaborator { clientIDForWriteRedeem = allocationObj.OwnerID } @@ -677,6 +677,11 @@ func (fsh *StorageHandler) RenameObject(ctx context.Context, r *http.Request) (i if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } + + if allocationObj.IsImmutable { + return nil, common.NewError("immutable_allocation", "Cannot rename data in an immutable allocation") + } + allocationID := allocationObj.ID clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) @@ -775,6 +780,10 @@ func (fsh *StorageHandler) UpdateObjectAttributes(ctx context.Context, return nil, common.NewError("invalid_signature", "Invalid signature") } + if alloc.IsImmutable { + return nil, common.NewError("immutable_allocation", "Cannot update data in an immutable allocation") + } + // runtime type check _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) @@ -871,6 +880,10 @@ func (fsh *StorageHandler) CopyObject(ctx context.Context, r *http.Request) (int return nil, common.NewError("invalid_signature", "Invalid signature") } + if allocationObj.IsImmutable { + return nil, common.NewError("immutable_allocation", "Cannot copy data in an immutable allocation") + } + clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) _ = ctx.Value(constants.CLIENT_KEY_CONTEXT_KEY).(string) @@ -1003,6 +1016,10 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*Upl return nil, common.NewError("invalid_signature", "Invalid signature") } + if allocationObj.IsImmutable { + return nil, common.NewError("immutable_allocation", "Cannot write to an immutable allocation") + } + allocationID := allocationObj.ID if len(clientID) == 0 { @@ -1037,7 +1054,7 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*Upl } if mode == allocation.DELETE_OPERATION { - if allocationObj.OwnerID != clientID && allocationObj.PayerID != clientID { + if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") } result, err = fsh.DeleteFile(ctx, r, connectionObj) @@ -1060,7 +1077,7 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*Upl existingFileRefSize := int64(0) exisitingFileOnCloud := false if mode == allocation.INSERT_OPERATION { - if allocationObj.OwnerID != clientID && allocationObj.PayerID != clientID { + if allocationObj.OwnerID != clientID && allocationObj.RepairerID != clientID { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner or the payer of the allocation") } @@ -1073,7 +1090,7 @@ func (fsh *StorageHandler) WriteFile(ctx context.Context, r *http.Request) (*Upl } if allocationObj.OwnerID != clientID && - allocationObj.PayerID != clientID && + allocationObj.RepairerID != clientID && !reference.IsACollaborator(ctx, exisitingFileRef.ID, clientID) { return nil, common.NewError("invalid_operation", "Operation needs to be performed by the owner, collaborator or the payer of the allocation") } diff --git a/code/go/0chain.net/blobbercore/handler/protocol.go b/code/go/0chain.net/blobbercore/handler/protocol.go index 9a4bfa9e8..a490de673 100644 --- a/code/go/0chain.net/blobbercore/handler/protocol.go +++ b/code/go/0chain.net/blobbercore/handler/protocol.go @@ -1,19 +1,20 @@ package handler import ( - "context" - "encoding/json" - "errors" "sync" "time" + "errors" + "context" + "encoding/json" + "go.uber.org/zap" + "github.com/0chain/gosdk/zcncore" "0chain.net/blobbercore/config" - . "0chain.net/core/logging" + "0chain.net/core/chain" "0chain.net/core/node" "0chain.net/core/transaction" - - "github.com/0chain/gosdk/zcncore" - "go.uber.org/zap" + "0chain.net/core/util" + . "0chain.net/core/logging" ) const ( @@ -56,21 +57,50 @@ func (ar *apiResp) err() error { //nolint:unused,deadcode // might be used later return nil } -func RegisterBlobber(ctx context.Context) (string, error) { - wcb := &WalletCallback{} - wcb.wg = &sync.WaitGroup{} - wcb.wg.Add(1) - err := zcncore.RegisterToMiners(node.Self.GetWallet(), wcb) - if err != nil { - return "", err +func getStorageNode() (*transaction.StorageNode, error) { + var err error + sn := &transaction.StorageNode{} + sn.ID = node.Self.ID + sn.BaseURL = node.Self.GetURLBase() + sn.Geolocation = transaction.StorageNodeGeolocation(config.Geolocation()) + sn.Capacity = config.Configuration.Capacity + readPrice := config.Configuration.ReadPrice + writePrice := config.Configuration.WritePrice + if config.Configuration.PriceInUSD { + readPrice, err = zcncore.ConvertUSDToToken(readPrice) + if err != nil { + return nil, err + } + + writePrice, err = zcncore.ConvertUSDToToken(writePrice) + if err != nil { + return nil, err + } } + sn.Terms.ReadPrice = zcncore.ConvertToValue(readPrice) + sn.Terms.WritePrice = zcncore.ConvertToValue(writePrice) + sn.Terms.MinLockDemand = config.Configuration.MinLockDemand + sn.Terms.MaxOfferDuration = config.Configuration.MaxOfferDuration + sn.Terms.ChallengeCompletionTime = config.Configuration.ChallengeCompletionTime + sn.StakePoolSettings.DelegateWallet = config.Configuration.DelegateWallet + sn.StakePoolSettings.MinStake = config.Configuration.MinStake + sn.StakePoolSettings.MaxStake = config.Configuration.MaxStake + sn.StakePoolSettings.NumDelegates = config.Configuration.NumDelegates + sn.StakePoolSettings.ServiceCharge = config.Configuration.ServiceCharge + return sn, nil +} + +// Add or update blobber on blockchain +func BlobberAdd(ctx context.Context) (string, error) { time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) + // initialize storage node (ie blobber) txn, err := transaction.NewTransactionEntity() if err != nil { return "", err } + sn, err := getStorageNode() if err != nil { return "", err @@ -80,11 +110,13 @@ func RegisterBlobber(ctx context.Context) (string, error) { if err != nil { return "", err } - Logger.Info("Adding blobber to the blockchain.") + + Logger.Info("Adding or updating on the blockchain") + err = txn.ExecuteSmartContract(transaction.STORAGE_CONTRACT_ADDRESS, transaction.ADD_BLOBBER_SC_NAME, string(snBytes), 0) if err != nil { - Logger.Info("Failed during registering blobber to the mining network", + Logger.Info("Failed to set blobber on the blockchain", zap.String("err:", err.Error())) return "", err } @@ -102,15 +134,16 @@ func BlobberHealthCheck(ctx context.Context) (string, error) { if config.Configuration.Capacity == 0 { return "", ErrBlobberHasRemoved } + txn, err := transaction.NewTransactionEntity() if err != nil { return "", err } - Logger.Info("Blobber health check to the blockchain.") + err = txn.ExecuteSmartContract(transaction.STORAGE_CONTRACT_ADDRESS, transaction.BLOBBER_HEALTH_CHECK, "", 0) if err != nil { - Logger.Info("Failed during blobber health check to the mining network", + Logger.Info("Failed to health check on the blockchain", zap.String("err:", err.Error())) return "", err } @@ -118,63 +151,26 @@ func BlobberHealthCheck(ctx context.Context) (string, error) { return txn.Hash, nil } -func UpdateBlobberSettings(ctx context.Context) (string, error) { - txn, err := transaction.NewTransactionEntity() - if err != nil { - return "", err - } - - sn, err := getStorageNode() - if err != nil { - return "", err - } - - snBytes, err := json.Marshal(sn) - if err != nil { - return "", err - } +func TransactionVerify(txnHash string) (t *transaction.Transaction, err error) { + time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) - Logger.Info("Updating settings to the blockchain.") - err = txn.ExecuteSmartContract(transaction.STORAGE_CONTRACT_ADDRESS, - transaction.UPDATE_BLOBBER_SETTINGS, string(snBytes), 0) - if err != nil { - Logger.Info("Failed during updating settings to the mining network", - zap.String("err:", err.Error())) - return "", err + for i := 0; i < util.MAX_RETRIES; i++ { + time.Sleep(transaction.SLEEP_FOR_TXN_CONFIRMATION * time.Second) + if t, err = transaction.VerifyTransaction(txnHash, chain.GetServerChain()); err == nil { + return t, nil + } } - return txn.Hash, nil + return } -func getStorageNode() (*transaction.StorageNode, error) { - var err error - sn := &transaction.StorageNode{} - sn.ID = node.Self.ID - sn.BaseURL = node.Self.GetURLBase() - sn.Capacity = config.Configuration.Capacity - readPrice := config.Configuration.ReadPrice - writePrice := config.Configuration.WritePrice - if config.Configuration.PriceInUSD { - readPrice, err = zcncore.ConvertUSDToToken(readPrice) - if err != nil { - return nil, err - } - - writePrice, err = zcncore.ConvertUSDToToken(writePrice) - if err != nil { - return nil, err - } +func WalletRegister() error { + wcb := &WalletCallback{} + wcb.wg = &sync.WaitGroup{} + wcb.wg.Add(1) + if err := zcncore.RegisterToMiners(node.Self.GetWallet(), wcb); err != nil { + return err } - sn.Terms.ReadPrice = zcncore.ConvertToValue(readPrice) - sn.Terms.WritePrice = zcncore.ConvertToValue(writePrice) - sn.Terms.MinLockDemand = config.Configuration.MinLockDemand - sn.Terms.MaxOfferDuration = config.Configuration.MaxOfferDuration - sn.Terms.ChallengeCompletionTime = config.Configuration.ChallengeCompletionTime - sn.StakePoolSettings.DelegateWallet = config.Configuration.DelegateWallet - sn.StakePoolSettings.MinStake = config.Configuration.MinStake - sn.StakePoolSettings.MaxStake = config.Configuration.MaxStake - sn.StakePoolSettings.NumDelegates = config.Configuration.NumDelegates - sn.StakePoolSettings.ServiceCharge = config.Configuration.ServiceCharge - return sn, nil + return nil } diff --git a/code/go/0chain.net/blobbercore/handler/storage_handler.go b/code/go/0chain.net/blobbercore/handler/storage_handler.go index a8fdc366b..a6dbb6daa 100644 --- a/code/go/0chain.net/blobbercore/handler/storage_handler.go +++ b/code/go/0chain.net/blobbercore/handler/storage_handler.go @@ -123,12 +123,12 @@ func (fsh *StorageHandler) GetFileMeta(ctx context.Context, r *http.Request) (in return nil, common.NewError("invalid_method", "Invalid method used. Use POST instead") } allocationTx := ctx.Value(constants.ALLOCATION_CONTEXT_KEY).(string) - allocationObj, err := fsh.verifyAllocation(ctx, allocationTx, true) + alloc, err := fsh.verifyAllocation(ctx, allocationTx, true) if err != nil { return nil, common.NewError("invalid_parameters", "Invalid allocation id passed."+err.Error()) } - allocationID := allocationObj.ID + allocationID := alloc.ID clientID := ctx.Value(constants.CLIENT_CONTEXT_KEY).(string) if len(clientID) == 0 { @@ -164,18 +164,24 @@ func (fsh *StorageHandler) GetFileMeta(ctx context.Context, r *http.Request) (in result["collaborators"] = collaborators - authTokenString := r.FormValue("auth_token") + // authorize file access + var ( + isOwner = clientID == alloc.OwnerID + isRepairer = clientID == alloc.RepairerID + isCollaborator = reference.IsACollaborator(ctx, fileref.ID, clientID) + ) - if (allocationObj.OwnerID != clientID && - allocationObj.PayerID != clientID && - !reference.IsACollaborator(ctx, fileref.ID, clientID)) || len(authTokenString) > 0 { - authTicketVerified, err := fsh.verifyAuthTicket(ctx, r.FormValue("auth_token"), allocationObj, fileref, clientID) - if err != nil { - return nil, err - } - if !authTicketVerified { - return nil, common.NewError("auth_ticket_verification_failed", "Could not verify the auth ticket.") + if !isOwner && !isRepairer && !isCollaborator { + var authTokenString = r.FormValue("auth_token") + + // check auth token + if isAuthorized, err := fsh.verifyAuthTicket(ctx, + authTokenString, alloc, fileref, clientID, + ); !isAuthorized { + return nil, common.NewErrorf("download_file", + "cannot verify auth ticket: %v", err) } + delete(result, "path") } @@ -696,7 +702,7 @@ func (fsh *StorageHandler) CalculateHash(ctx context.Context, r *http.Request) ( // verifySignatureFromRequest verifyes signature passed as common.ClientSignatureHeader header. func verifySignatureFromRequest(r *http.Request, pbK string) (bool, error) { - sign := r.Header.Get(common.ClientSignatureHeader) + sign := encryption.MiraclToHerumiSig(r.Header.Get(common.ClientSignatureHeader)) if len(sign) < 64 { return false, nil } @@ -707,7 +713,9 @@ func verifySignatureFromRequest(r *http.Request, pbK string) (bool, error) { return false, common.NewError("invalid_params", "Missing allocation tx") } - return encryption.Verify(pbK, sign, encryption.Hash(data)) + hash := encryption.Hash(data) + pbK = encryption.MiraclToHerumiPK(pbK) + return encryption.Verify(pbK, sign, hash) } // pathsFromReq retrieves paths value from request which can be represented as single "path" value or "paths" values, diff --git a/code/go/0chain.net/blobbercore/handler/zcncore.go b/code/go/0chain.net/blobbercore/handler/zcncore.go index 2966ee921..175889f82 100644 --- a/code/go/0chain.net/blobbercore/handler/zcncore.go +++ b/code/go/0chain.net/blobbercore/handler/zcncore.go @@ -2,6 +2,7 @@ package handler import ( "sync" + "encoding/json" "github.com/0chain/gosdk/core/common" "github.com/0chain/gosdk/zcncore" @@ -11,6 +12,7 @@ type ZCNStatus struct { wg *sync.WaitGroup success bool balance int64 + info string } func (zcn *ZCNStatus) OnBalanceAvailable(status int, value int64, info string) { @@ -23,6 +25,16 @@ func (zcn *ZCNStatus) OnBalanceAvailable(status int, value int64, info string) { zcn.balance = value } +func (zcn *ZCNStatus) OnInfoAvailable(op int, status int, info string, err string) { + defer zcn.wg.Done() + if status == zcncore.StatusSuccess { + zcn.success = true + } else { + zcn.success = false + } + zcn.info = info +} + func (zcn *ZCNStatus) OnTransactionComplete(t *zcncore.Transaction, status int) { defer zcn.wg.Done() if status == zcncore.StatusSuccess { @@ -49,7 +61,7 @@ func CheckBalance() (float64, error) { wg.Add(1) err := zcncore.GetBalance(statusBar) if err != nil { - return 0, common.NewError("check_balance_failed", "Call to GetBalance failed with err: "+err.Error()) + return 0, common.NewError("check_balance_failed", "Call to GetBalance failed with err: " + err.Error()) } wg.Wait() if !statusBar.success { @@ -58,6 +70,32 @@ func CheckBalance() (float64, error) { return zcncore.ConvertToToken(statusBar.balance), nil } +func GetBlobbers() ([]*zcncore.Blobber, error) { + var info struct { + Nodes []*zcncore.Blobber + } + + wg := &sync.WaitGroup{} + statusBar := &ZCNStatus{wg: wg} + wg.Add(1) + + err := zcncore.GetBlobbers(statusBar) + if err != nil { + return info.Nodes, common.NewError("get_blobbers_failed", "Call to GetBlobbers failed with err: " + err.Error()) + } + wg.Wait() + + if !statusBar.success { + return info.Nodes, nil + } + + if err = json.Unmarshal([]byte(statusBar.info), &info); err != nil { + return info.Nodes, common.NewError("get_blobbers_failed", "Decoding response to GetBlobbers failed with err: " + err.Error()) + } + + return info.Nodes, nil +} + func CallFaucet() error { wg := &sync.WaitGroup{} statusBar := &ZCNStatus{wg: wg} diff --git a/code/go/0chain.net/blobbercore/openapi/blobber.swagger.json b/code/go/0chain.net/blobbercore/openapi/blobber.swagger.json index 327425609..05beb9483 100644 --- a/code/go/0chain.net/blobbercore/openapi/blobber.swagger.json +++ b/code/go/0chain.net/blobbercore/openapi/blobber.swagger.json @@ -485,6 +485,12 @@ "OwnerPublicKey": { "type": "string" }, + "RepairerID": { + "type": "string" + }, + "PayerID": { + "type": "string" + }, "Expiration": { "type": "string", "format": "int64" @@ -521,9 +527,6 @@ "items": { "$ref": "#/definitions/v1Term" } - }, - "PayerID": { - "type": "string" } } }, diff --git a/code/go/0chain.net/blobbercore/readmarker/entity.go b/code/go/0chain.net/blobbercore/readmarker/entity.go index d01030834..a486ade2e 100644 --- a/code/go/0chain.net/blobbercore/readmarker/entity.go +++ b/code/go/0chain.net/blobbercore/readmarker/entity.go @@ -108,7 +108,6 @@ func GetLatestReadMarkerEntity(ctx context.Context, clientID string) (*ReadMarke } func SaveLatestReadMarker(ctx context.Context, rm *ReadMarker, isCreate bool) error { - var ( db = datastore.GetStore().GetTransaction(ctx) rmEntity = &ReadMarkerEntity{} diff --git a/code/go/0chain.net/blobbercore/stats/blobberstats.go b/code/go/0chain.net/blobbercore/stats/blobberstats.go index 080088a1b..5d4f1c111 100644 --- a/code/go/0chain.net/blobbercore/stats/blobberstats.go +++ b/code/go/0chain.net/blobbercore/stats/blobberstats.go @@ -42,7 +42,7 @@ type WriteMarkersStat struct { } type Stats struct { - TotalSize int64 `json:"total_size"` // the total allocated size + AllocatedSize int64 `json:"allocated_size"` UsedSize int64 `json:"used_size"` FilesSize int64 `json:"files_size"` ThumbnailsSize int64 `json:"thumbnails_size"` @@ -145,7 +145,7 @@ func (bs *BlobberStats) loadDetailedStats(ctx context.Context) { } given[as.AllocationID] = struct{}{} - bs.TotalSize += as.TotalSize + bs.AllocatedSize += as.AllocatedSize as.ReadMarkers, err = loadAllocReadMarkersStat(ctx, as.AllocationID) if err != nil { @@ -252,7 +252,7 @@ func (bs *BlobberStats) loadAllocationStats(ctx context.Context) { SUM(file_stats.num_of_block_downloads) as num_of_reads, SUM(reference_objects.num_of_blocks) as num_of_block_writes, COUNT(*) as num_of_writes, - allocations.size AS total_size, + allocations.size AS allocated_size, allocations.expiration_date AS expiration_date`). Joins(`INNER JOIN file_stats ON reference_objects.id = file_stats.ref_id`). @@ -262,6 +262,7 @@ func (bs *BlobberStats) loadAllocationStats(ctx context.Context) { Where(`reference_objects.type = 'f' AND reference_objects.deleted_at IS NULL`). Group(`reference_objects.allocation_id, allocations.expiration_date`). + Group(`reference_objects.allocation_id, allocations.size`). Rows() if err != nil { @@ -272,8 +273,8 @@ func (bs *BlobberStats) loadAllocationStats(ctx context.Context) { for rows.Next() { var as = &AllocationStats{} - err = rows.Scan(&as.AllocationID, &as.TotalSize, &as.FilesSize, &as.ThumbnailsSize, - &as.NumReads, &as.BlockWrites, &as.NumWrites, &as.Expiration) + err = rows.Scan(&as.AllocationID, &as.FilesSize, &as.ThumbnailsSize, + &as.NumReads, &as.BlockWrites, &as.NumWrites, &as.AllocatedSize, &as.Expiration) if err != nil { Logger.Error("Error in scanning record for blobber stats", zap.Error(err)) diff --git a/code/go/0chain.net/blobbercore/stats/handler.go b/code/go/0chain.net/blobbercore/stats/handler.go index aacd60049..faea3cdaa 100644 --- a/code/go/0chain.net/blobbercore/stats/handler.go +++ b/code/go/0chain.net/blobbercore/stats/handler.go @@ -65,7 +65,7 @@ const tpl = ` Allocated size (bytes) - {{ .TotalSize }} + {{ .AllocatedSize }} Used Size (bytes) diff --git a/code/go/0chain.net/core/common/handler.go b/code/go/0chain.net/core/common/handler.go index 71b933456..f84af02e6 100644 --- a/code/go/0chain.net/core/common/handler.go +++ b/code/go/0chain.net/core/common/handler.go @@ -39,6 +39,7 @@ type JSONReqResponderF func(ctx context.Context, json map[string]interface{}) (i /*Respond - respond either data or error as a response */ func Respond(w http.ResponseWriter, data interface{}, err error) { + w.Header().Set("Access-Control-Allow-Origin", "*") // CORS for all. w.Header().Set("Content-Type", "application/json") if err != nil { data := make(map[string]interface{}, 2) @@ -105,6 +106,7 @@ func SetupCORSResponse(w http.ResponseWriter, r *http.Request) { */ func ToJSONResponse(handler JSONResponderF) ReqRespHandlerf { return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") // CORS for all. if r.Method == "OPTIONS" { SetupCORSResponse(w, r) return diff --git a/code/go/0chain.net/core/encryption/keys.go b/code/go/0chain.net/core/encryption/keys.go index bda80893a..7000c2560 100644 --- a/code/go/0chain.net/core/encryption/keys.go +++ b/code/go/0chain.net/core/encryption/keys.go @@ -3,11 +3,14 @@ package encryption import ( "bufio" "io" + "strings" "0chain.net/core/common" "0chain.net/core/config" + . "0chain.net/core/logging" "github.com/0chain/gosdk/core/zcncrypto" + "github.com/herumi/bls-go-binary/bls" ) /*ReadKeys - reads a publicKey and a privateKey from a Reader. @@ -28,6 +31,8 @@ func ReadKeys(reader io.Reader) (publicKey string, privateKey string, publicIp s //Verify - given a public key and a signature and the hash used to create the signature, verify the signature func Verify(publicKey string, signature string, hash string) (bool, error) { + publicKey = MiraclToHerumiPK(publicKey) + signature = MiraclToHerumiSig(signature) signScheme := zcncrypto.NewSignatureScheme(config.Configuration.SignatureScheme) if signScheme != nil { err := signScheme.SetPublicKey(publicKey) @@ -38,3 +43,57 @@ func Verify(publicKey string, signature string, hash string) (bool, error) { } return false, common.NewError("invalid_signature_scheme", "Invalid signature scheme. Please check configuration") } + +// If input is normal herumi/bls public key, it returns it immmediately. +// So this is completely backward compatible with herumi/bls. +// If input is MIRACL public key, convert it to herumi/bls public key. +// +// This is an example of the raw public key we expect from MIRACL +var miraclExamplePK = `0418a02c6bd223ae0dfda1d2f9a3c81726ab436ce5e9d17c531ff0a385a13a0b491bdfed3a85690775ee35c61678957aaba7b1a1899438829f1dc94248d87ed36817f6dfafec19bfa87bf791a4d694f43fec227ae6f5a867490e30328cac05eaff039ac7dfc3364e851ebd2631ea6f1685609fc66d50223cc696cb59ff2fee47ac` +// +// This is an example of the same MIRACL public key serialized with ToString(). +// pk ([1bdfed3a85690775ee35c61678957aaba7b1a1899438829f1dc94248d87ed368,18a02c6bd223ae0dfda1d2f9a3c81726ab436ce5e9d17c531ff0a385a13a0b49],[039ac7dfc3364e851ebd2631ea6f1685609fc66d50223cc696cb59ff2fee47ac,17f6dfafec19bfa87bf791a4d694f43fec227ae6f5a867490e30328cac05eaff]) +func MiraclToHerumiPK(pk string) string { + if len(pk) != len(miraclExamplePK) { + // If input is normal herumi/bls public key, it returns it immmediately. + return pk + } + n1 := pk[2:66] + n2 := pk[66:(66+64)] + n3 := pk[(66+64):(66+64+64)] + n4 := pk[(66+64+64):(66+64+64+64)] + var p bls.PublicKey + err := p.SetHexString("1 " + n2 + " " + n1 + " " + n4 + " " + n3) + if err != nil { + Logger.Error("MiraclToHerumiPK: " + err.Error()) + } + return p.SerializeToHexStr() +} + +// Converts signature 'sig' to format that the herumi/bls library likes. +// zwallets are using MIRACL library which send a MIRACL signature not herumi +// lib. +// +// If the 'sig' was not in MIRACL format, we just return the original sig. +const miraclExampleSig = `(0d4dbad6d2586d5e01b6b7fbad77e4adfa81212c52b4a0b885e19c58e0944764,110061aa16d5ba36eef0ad4503be346908d3513c0a2aedfd0d2923411b420eca)` +func MiraclToHerumiSig(sig string) string { + if len(sig) <= 2 { + return sig + } + if sig[0] != miraclExampleSig[0] { + return sig + } + withoutParens := sig[1: (len(sig)-1) ] + comma := strings.Index(withoutParens, ",") + if comma < 0 { + return "00" + } + n1 := withoutParens[0:comma] + n2 := withoutParens[(comma+1):] + var sign bls.Sign + err := sign.SetHexString("1 " + n1 + " " + n2) + if err != nil { + Logger.Error("MiraclToHerumiSig: " + err.Error()) + } + return sign.SerializeToHexStr() +} diff --git a/code/go/0chain.net/core/encryption/keys_test.go b/code/go/0chain.net/core/encryption/keys_test.go index c8ee3d092..592dbe4e8 100644 --- a/code/go/0chain.net/core/encryption/keys_test.go +++ b/code/go/0chain.net/core/encryption/keys_test.go @@ -1,8 +1,10 @@ package encryption import ( + "encoding/hex" "github.com/0chain/gosdk/zboxcore/client" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "testing" ) @@ -20,4 +22,58 @@ func TestSignatureVerify(t *testing.T) { ) assert.Nil(t, err) assert.Equal(t, res, true) +} + +func TestMiraclToHerumiPK(t *testing.T) { + miraclpk1 := `0418a02c6bd223ae0dfda1d2f9a3c81726ab436ce5e9d17c531ff0a385a13a0b491bdfed3a85690775ee35c61678957aaba7b1a1899438829f1dc94248d87ed36817f6dfafec19bfa87bf791a4d694f43fec227ae6f5a867490e30328cac05eaff039ac7dfc3364e851ebd2631ea6f1685609fc66d50223cc696cb59ff2fee47ac` + pk1 := MiraclToHerumiPK(miraclpk1) + + require.EqualValues(t, pk1, "68d37ed84842c91d9f82389489a1b1a7ab7a957816c635ee750769853aeddf1b490b3aa185a3f01f537cd1e9e56c43ab2617c8a3f9d2a1fd0dae23d26b2ca018") + + // Assert DeserializeHexStr works on the output of MiraclToHerumiPK + var pk bls.PublicKey + err := pk.DeserializeHexStr(pk1) + require.NoError(t, err) +} + +func TestMiraclToHerumiSig(t *testing.T) { + miraclsig1 := `(0d4dbad6d2586d5e01b6b7fbad77e4adfa81212c52b4a0b885e19c58e0944764,110061aa16d5ba36eef0ad4503be346908d3513c0a2aedfd0d2923411b420eca)` + sig1 := MiraclToHerumiSig(miraclsig1) + + require.EqualValues(t, sig1, "644794e0589ce185b8a0b4522c2181faade477adfbb7b6015e6d58d2d6ba4d0d") + + // Assert DeserializeHexStr works on the output of MiraclToHerumiSig + var sig bls.Sign + err := sig.DeserializeHexStr(sig1) + require.NoError(t, err) + + // Test that passing in normal herumi sig just gets back the original. + sig2 := MiraclToHerumiSig(sig1) + if sig1 != sig2 { + panic("Signatures should be the same.") + } +} + +// Helper code to print out expected values of Hash and conversion functions. +func TestDebugOnly(t *testing.T) { + + // clientKey := "536d2ecfe5aab6c343e8c2e7ee9daa60c43eecc53f4b1c07a6cb2648d9e66c14f2e3fcd43875be40722992f56570fe3c751caacbc7d859b309c787f654bd5a97" + // // => 5c2fdfa03fc013cff0e4b716f0529b914e18fd2bc6cdfed49df13b6e3dc4684d + + clientKey := "0416c528570ce46eb83584cd604a9ed62644ef4f71a86587d57e4ab91953ff4699107374870799ad4550c4f3833cca2a4d5de75436d67caf89097f1e7d6d7de6d424cb5a08b9dca8957ea7c81a23d066b93a27500954cd29733149ec1f8a8abd540d08f9f81bb24b83ff27e24f173e639573e10a22ed7b0ca326a1aa9dc03e1eef" + // => bd3adcacc78ed4352931b138729986a07d2bf0e0a3bf2c885b37a9a0e649dd87 + // Looking for bd3adcacc78ed4352931b138729986a07d2bf0e0a3bf2c885b37a9a0e649dd87 + + clientKeyBytes, _ := hex.DecodeString(clientKey) + h := Hash(clientKeyBytes) + + fmt.Println("hash ", h) + + herumipk := MiraclToHerumiPK(clientKey) + fmt.Println("herumipk ", herumipk) + clientKeyBytes2, _ := hex.DecodeString(herumipk) + h = Hash(clientKeyBytes2) + fmt.Println("hash2 ", h) + + } \ No newline at end of file diff --git a/code/go/0chain.net/core/transaction/entity.go b/code/go/0chain.net/core/transaction/entity.go index 72f8eabe5..8be9e6c31 100644 --- a/code/go/0chain.net/core/transaction/entity.go +++ b/code/go/0chain.net/core/transaction/entity.go @@ -68,13 +68,19 @@ type StakePoolSettings struct { ServiceCharge float64 `json:"service_charge"` } +type StorageNodeGeolocation struct { + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` +} + type StorageNode struct { - ID string `json:"id"` - BaseURL string `json:"url"` - Terms Terms `json:"terms"` - Capacity int64 `json:"capacity"` - PublicKey string `json:"-"` - StakePoolSettings StakePoolSettings `json:"stake_pool_settings"` + ID string `json:"id"` + BaseURL string `json:"url"` + Geolocation StorageNodeGeolocation `json:"geolocation"` + Terms Terms `json:"terms"` + Capacity int64 `json:"capacity"` + PublicKey string `json:"-"` + StakePoolSettings StakePoolSettings `json:"stake_pool_settings"` } type BlobberAllocation struct { @@ -95,6 +101,7 @@ type StorageAllocation struct { Finalized bool `json:"finalized"` CCT time.Duration `json:"challenge_completion_time"` TimeUnit time.Duration `json:"time_unit"` + IsImmutable bool `json:"is_immutable"` } func (sa *StorageAllocation) Until() common.Timestamp { @@ -115,7 +122,6 @@ const ( READ_REDEEM = "read_redeem" CHALLENGE_RESPONSE = "challenge_response" BLOBBER_HEALTH_CHECK = "blobber_health_check" - UPDATE_BLOBBER_SETTINGS = "update_blobber_settings" FINALIZE_ALLOCATION = "finalize_allocation" ) diff --git a/code/go/0chain.net/go.mod b/code/go/0chain.net/go.mod index ef3b0dd34..f6e01cca6 100644 --- a/code/go/0chain.net/go.mod +++ b/code/go/0chain.net/go.mod @@ -1,7 +1,7 @@ module 0chain.net require ( - github.com/0chain/gosdk v1.1.6 + github.com/0chain/gosdk marketplace github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/didip/tollbooth v4.0.2+incompatible github.com/go-ini/ini v1.55.0 // indirect @@ -9,6 +9,7 @@ require ( github.com/gorilla/mux v1.7.3 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0 + github.com/herumi/bls-go-binary v0.0.0-20191119080710-898950e1a520 github.com/jackc/pgproto3/v2 v2.0.4 // indirect github.com/koding/cache v0.0.0-20161222233015-e8a81b0b3f20 github.com/minio/minio-go v6.0.14+incompatible diff --git a/code/go/0chain.net/go.sum b/code/go/0chain.net/go.sum index a0ecd3327..dec840020 100644 --- a/code/go/0chain.net/go.sum +++ b/code/go/0chain.net/go.sum @@ -334,7 +334,6 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -350,7 +349,6 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -400,7 +398,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v1.0.1-0.20201006035406-b97b5ead31f7/go.mod h1:yk5b0mALVusDL5fMM6Rd1wgnoO5jUPhwsQ6LQAJTidQ= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -412,7 +409,6 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -446,22 +442,18 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= @@ -475,7 +467,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -500,7 +491,6 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -513,7 +503,6 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd h1:ePuNC7PZ6O5BzgPn9bZayERXBdfZjUYoXEf5BTfDfh8= golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= @@ -614,7 +603,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= @@ -657,7 +645,6 @@ golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa h1:5E4dL8+NgFOgjwbTKz+OOEGGhP+ectTmF842l6KjupQ= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -681,7 +668,6 @@ golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -778,7 +764,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10= gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -790,7 +775,6 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -818,7 +802,6 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= diff --git a/code/go/0chain.net/validatorcore/storage/context.go b/code/go/0chain.net/validatorcore/storage/context.go index 79a977074..2a761fe4a 100644 --- a/code/go/0chain.net/validatorcore/storage/context.go +++ b/code/go/0chain.net/validatorcore/storage/context.go @@ -10,7 +10,8 @@ import ( func SetupContext(handler common.JSONResponderF) common.JSONResponderF { return func(ctx context.Context, r *http.Request) (interface{}, error) { ctx = context.WithValue(ctx, CLIENT_CONTEXT_KEY, r.Header.Get(common.ClientHeader)) - ctx = context.WithValue(ctx, CLIENT_KEY_CONTEXT_KEY, r.Header.Get(common.ClientKeyHeader)) + ctx = context.WithValue(ctx, CLIENT_KEY_CONTEXT_KEY, + r.Header.Get(common.ClientKeyHeader)) res, err := handler(ctx, r) return res, err } diff --git a/config/0chain_blobber.yaml b/config/0chain_blobber.yaml index 73201116e..70a5fbc51 100755 --- a/config/0chain_blobber.yaml +++ b/config/0chain_blobber.yaml @@ -87,6 +87,10 @@ db: host: postgres port: 5432 +geolocation: + latitude: 0 + longitude: 0 + minio: # Enable or disable minio backup service start: false diff --git a/docker.local/b0docker-compose.yml b/docker.local/b0docker-compose.yml index 6030b1930..af8ba85c1 100644 --- a/docker.local/b0docker-compose.yml +++ b/docker.local/b0docker-compose.yml @@ -62,7 +62,7 @@ services: ports: - "505${BLOBBER}:505${BLOBBER}" - "703${BLOBBER}:703${BLOBBER}" - command: ./bin/blobber --port 505${BLOBBER} --grpc_port 703${BLOBBER} --hostname localhost --deployment_mode 0 --keys_file keysconfig/b0bnode${BLOBBER}_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --minio_file keysconfig/minio_config.txt + command: ./bin/blobber --port 505${BLOBBER} --grpc_port 703${BLOBBER} --hostname 198.18.0.9${BLOBBER} --deployment_mode 0 --keys_file keysconfig/b0bnode${BLOBBER}_keys.txt --files_dir /blobber/files --log_dir /blobber/log --db_dir /blobber/data --minio_file keysconfig/minio_config.txt networks: default: testnet0: diff --git a/docker.local/bin/build.blobber-integration-tests.sh b/docker.local/bin/build.blobber-integration-tests.sh index 69b76eff7..895d5974c 100755 --- a/docker.local/bin/build.blobber-integration-tests.sh +++ b/docker.local/bin/build.blobber-integration-tests.sh @@ -4,8 +4,21 @@ set -e GIT_COMMIT=$(git rev-list -1 HEAD) echo $GIT_COMMIT -docker build --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/ValidatorDockerfile . -t validator -docker build --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/IntegrationTestsBlobberDockerfile . -t blobber +cmd="build" + +for arg in "$@" +do + case $arg in + -m1|--m1|m1) + echo "The build will be performed for Apple M1 chip" + cmd="buildx build --platform linux/amd64" + shift + ;; + esac +done + +docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/ValidatorDockerfile . -t validator +docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/IntegrationTestsBlobberDockerfile . -t blobber for i in $(seq 1 6); do diff --git a/docker.local/bin/build.blobber.sh b/docker.local/bin/build.blobber.sh index 6b27f854c..4cd4fda82 100755 --- a/docker.local/bin/build.blobber.sh +++ b/docker.local/bin/build.blobber.sh @@ -4,8 +4,21 @@ set -e GIT_COMMIT=$(git rev-list -1 HEAD) echo $GIT_COMMIT -docker build --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/ValidatorDockerfile . -t validator -docker build --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/Dockerfile . -t blobber +cmd="build" + +for arg in "$@" +do + case $arg in + -m1|--m1|m1) + echo "The build will be performed for Apple M1 chip" + cmd="buildx build --platform linux/amd64" + shift + ;; + esac +done + +docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/ValidatorDockerfile . -t validator +docker $cmd --build-arg GIT_COMMIT=$GIT_COMMIT -f docker.local/Dockerfile . -t blobber for i in $(seq 1 6); do diff --git a/docs/cicd/CICD_GITACTIONS.md b/docs/cicd/CICD_GITACTIONS.md new file mode 100644 index 000000000..3aecae196 --- /dev/null +++ b/docs/cicd/CICD_GITACTIONS.md @@ -0,0 +1,128 @@ + + +## Guide to CI/CD using github actions + +## Workflow Creation. + - A new workflow is created using Go project with the file name called "build.yml". + - By default the path of build.yml is ".github/workflows.build.yml" + - Completed or running CI/CD can be seen under actions option. + + +## Details of components being used in build.yml. +#### Workflow name +Here the name of the workflow is defined i.e. "Dockerize" +``` +name: Dockerize +``` + +#### Input Option to trigger manually builds +To run the workflow using manual option, *work_dispatch* is used. Which will ask for the input to tigger the builds with *latest* tag or not. If we select for **yes**, image will be build with *latest* tag as well as with *branch-commitid* tag. But if we select for **no**, image will be build with *branch-commitid* tag only. + +``` +on: + workflow_dispatch: + inputs: + latest_tag: + description: 'type yes for building latest tag' + default: 'no' + required: true +``` + +#### Global ENV setup +Environment variable is defined with the secrets added to the repository. Here secrets contains the docker images(example- dockerhub) repository name. +``` +env: + BLOBBER_REGISTRY: ${{ secrets.BLOBBER_REGISTRY }} + VALIDATOR_REGISTRY: ${{ secrets.VALIDATOR_REGISTRY }} +``` + +#### Defining jobs and runner +Jobs are defined which contains the various steps for creating and pushing the builds. Runner envionment is also defined used for making the builds. +``` +jobs: + dockerize_blobber: + runs-on: ubuntu-20.04 + ... + dockerize_validator: + runs-on: ubuntu-20.04 +``` + +#### Different steps used in creating the builds +Here different steps are defined used for creating the builds. + - *uses* --> checkout to branch from what code to create the builds. + - *Get the version* --> Creating the tags by combining the branch name & first 8 digits of commit id. + - *Login to Docker Hub* --> Logging into the docker hub using Username and Password from secrets of the repository. + - *Build blobber/validator* --> Building, tagging and pushing the docker images with the *Get the version* tag. + - *Push blobber/validator* --> Here we are checking if the input given by user is **yes**, images is also pushed with latest tag also. + +For Blobber +``` +steps: +- uses: actions/checkout@v2 + +- name: Get the version + id: get_version + run: | + BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g') + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + echo ::set-output name=BRANCH::${BRANCH} + echo ::set-output name=VERSION::${BRANCH}-${SHORT_SHA} + +- name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + +- name: Build blobber + run: | + docker build -t $BLOBBER_REGISTRY:$TAG -f "$DOCKERFILE_BLOB" . + docker tag $BLOBBER_REGISTRY:$TAG $BLOBBER_REGISTRY:latest + ocker push $BLOBBER_REGISTRY:$TAG + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + DOCKERFILE_BLOB: "docker.local/Dockerfile" + +- name: Push blobber + run: | + if [[ "$PUSH_LATEST" == "yes" ]]; then + docker push $BLOBBER_REGISTRY:latest + fi + env: + PUSH_LATEST: ${{ github.event.inputs.latest_tag }} +``` +For Validator +``` +steps: +- uses: actions/checkout@v1 + +- name: Get the version + id: get_version + run: | + BRANCH=$(echo ${GITHUB_REF#refs/heads/} | sed 's/\//-/g') + SHORT_SHA=$(echo $GITHUB_SHA | head -c 8) + echo ::set-output name=BRANCH::${BRANCH} + echo ::set-output name=VERSION::${BRANCH}-${SHORT_SHA} +- name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + +- name: Build validator + run: | + docker build -t $VALIDATOR_REGISTRY:$TAG -f "$DOCKERFILE_PROXY" . + docker tag $VALIDATOR_REGISTRY:$TAG $VALIDATOR_REGISTRY:latest + docker push $VALIDATOR_REGISTRY:$TAG + env: + TAG: ${{ steps.get_version.outputs.VERSION }} + DOCKERFILE_PROXY: "docker.local/ValidatorDockerfile" + +- name: Push validator + run: | + if [[ "$PUSH_LATEST" == "yes" ]]; then + docker push $VALIDATOR_REGISTRY:latest + fi + env: + PUSH_LATEST: ${{ github.event.inputs.latest_tag }} +``` diff --git a/docs/cicd/blobber.png b/docs/cicd/blobber.png new file mode 100644 index 000000000..14364278e Binary files /dev/null and b/docs/cicd/blobber.png differ diff --git a/docs/src/repair.plantuml b/docs/src/repair.plantuml index 94009b9b9..ffde5ea64 100644 --- a/docs/src/repair.plantuml +++ b/docs/src/repair.plantuml @@ -1,6 +1,6 @@ @startuml actor Client -actor Payer +actor Repairer group partial upload Client -> Blobber : List command on a directory diff --git a/sql/14-increase_owner_pubkey.sql b/sql/14-increase_owner_pubkey.sql new file mode 100644 index 000000000..f6cd40520 --- /dev/null +++ b/sql/14-increase_owner_pubkey.sql @@ -0,0 +1,16 @@ +-- +-- Increase the char limit of owner_public_key from 256 to 512. +-- + +-- pew-pew +\connect blobber_meta; + +-- in a transaction +BEGIN; + ALTER TABLE allocations + ALTER COLUMN owner_public_key TYPE varchar(512); + ALTER TABLE read_markers + ALTER COLUMN client_public_key TYPE varchar(512); + ALTER TABLE write_markers + ALTER COLUMN client_key TYPE varchar(512); +COMMIT; diff --git a/sql/15-add-allocation-columns.sql b/sql/15-add-allocation-columns.sql new file mode 100644 index 000000000..c983cf73f --- /dev/null +++ b/sql/15-add-allocation-columns.sql @@ -0,0 +1,6 @@ +\connect blobber_meta; + +BEGIN; + ALTER TABLE allocations ADD COLUMN repairer_id VARCHAR(64) NOT NULL; + ALTER TABLE allocations ADD COLUMN is_immutable BOOLEAN NOT NULL; +COMMIT; \ No newline at end of file diff --git a/sql/14-add-marketplace-table.sql b/sql/16-add-marketplace-table.sql similarity index 100% rename from sql/14-add-marketplace-table.sql rename to sql/16-add-marketplace-table.sql diff --git a/sql/15-add-indexes-to-reference-objects.sql b/sql/17-add-indexes-to-reference-objects.sql similarity index 100% rename from sql/15-add-indexes-to-reference-objects.sql rename to sql/17-add-indexes-to-reference-objects.sql