diff --git a/.github/workflows/all.yml b/.github/workflows/all.yml index 6ce7157..515ffd0 100644 --- a/.github/workflows/all.yml +++ b/.github/workflows/all.yml @@ -9,19 +9,39 @@ on: - '*' jobs: - test: + test_lib: runs-on: ubuntu-latest strategy: matrix: go_version: [ '1.18', '1.19' ] steps: - - uses: actions/checkout@v2 - - uses: actions/setup-go@v1 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go_version }} - - name: Install go dependencies - run: go get -t -v ./... + - name: Run library go tests + run: cd lib; go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... - - name: Run go tests - run: go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... + test_stores: + runs-on: ubuntu-latest + strategy: + matrix: + go_version: [ '1.18', '1.19' ] + store: + - bigcache + - freecache + - go_cache + - memcache + - pegasus + - redis + - rediscluster + - ristretto + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.go_version }} + + - name: Run stores go tests + run: cd store/${{ matrix.store }}; go test -v -race ./... diff --git a/LICENSE b/LICENSE index 4aa8e70..902e842 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2021 Vincent Composieux +Copyright (c) 2022 Vincent Composieux Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile index 21df541..31ba1a7 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,25 @@ .PHONY: mocks test benchmark-store mocks: - mockgen -source=cache/interface.go -destination=test/mocks/cache/cache_interface.go -package=mocks - mockgen -source=codec/interface.go -destination=test/mocks/codec/codec_interface.go -package=mocks - mockgen -source=metrics/interface.go -destination=test/mocks/metrics/metrics_interface.go -package=mocks - mockgen -source=store/interface.go -destination=test/mocks/store/store_interface.go -package=mocks - mockgen -source=store/bigcache.go -destination=test/mocks/store/clients/bigcache_interface.go -package=mocks - mockgen -source=store/memcache.go -destination=test/mocks/store/clients/memcache_interface.go -package=mocks - mockgen -source=store/redis.go -destination=test/mocks/store/clients/redis_interface.go -package=mocks - mockgen -source=store/rediscluster.go -destination=test/mocks/store/clients/rediscluster_interface.go -package=mocks - mockgen -source=store/ristretto.go -destination=test/mocks/store/clients/ristretto_interface.go -package=mocks - mockgen -source=store/freecache.go -destination=test/mocks/store/clients/freecache_interface.go -package=mocks - mockgen -source=store/go_cache.go -destination=test/mocks/store/clients/go_cache_interface.go -package=mocks + mockgen -source=cache/interface.go -destination=lib/cache/cache_mock.go -package=cache + mockgen -source=codec/interface.go -destination=lib/codec/codec_mock.go -package=codec + mockgen -source=metrics/interface.go -destination=lib/metrics/metrics_mock.go -package=metrics + mockgen -source=store/interface.go -destination=lib/store/store_mock.go -package=store + mockgen -source=store/bigcache/bigcache.go -destination=store/bigcache_mock.go -package=bigcache + mockgen -source=store/memcache/memcache.go -destination=store/memcache_mock.go -package=memcache + mockgen -source=store/redis/redis.go -destination=store/redis_mock.go -package=redis + mockgen -source=store/rediscluster/rediscluster.go -destination=store/rediscluster_mock.go -package=rediscluster + mockgen -source=store/ristretto/ristretto.go -destination=store/ristretto_mock.go -package=ristretto + mockgen -source=store/freecache/freecache.go -destination=store/freecache_mock.go -package=freecache + mockgen -source=store/go_cache/go_cache.go -destination=store/go_cache_mock.go -package=go_cache + test: - GOGC=10 go test -p=4 ./... -benchmark-store: - cd store && go test -bench=. -benchmem -benchtime=1s -count=1 -run=none \ No newline at end of file + cd lib; GOGC=10 go test -v -p=4 ./... + cd store/bigcache; GOGC=10 go test -v -p=4 ./... + cd store/freecache; GOGC=10 go test -v -p=4 ./... + cd store/go_cache; GOGC=10 go test -v -p=4 ./... + cd store/memcache; GOGC=10 go test -v -p=4 ./... + cd store/pegasus; GOGC=10 go test -v -p=4 ./... + cd store/redis; GOGC=10 go test -v -p=4 ./... + cd store/rediscluster; GOGC=10 go test -v -p=4 ./... + cd store/ristretto; GOGC=10 go test -v -p=4 ./... diff --git a/README.md b/README.md index f38087c..4cbb120 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,38 @@ Here is what it brings in detail: * [Prometheus](https://github.com/prometheus/client_golang) +## Installation + +To begin working with the latest version of gocache, you can import the library in your project: + +```go +go get github.com/eko/gocache/v4/lib +``` + +and then, import the store(s) you want to use between all available ones: + +```go +go get github.com/eko/gocache/v4/store/bigcache +go get github.com/eko/gocache/v4/store/freecache +go get github.com/eko/gocache/v4/store/go_cache +go get github.com/eko/gocache/v4/store/memcache +go get github.com/eko/gocache/v4/store/pegasus +go get github.com/eko/gocache/v4/store/redis +go get github.com/eko/gocache/v4/store/rediscluster +go get github.com/eko/gocache/v4/store/ristretto +``` + +Then, simply use the following import statements: + +```go +import ( + "github.com/eko/gocache/v4/lib/cache" + "github.com/eko/gocache/v4/store/redis" +) +``` + +If you run into any errors, please be sure to run `go mod tidy` to clean your go.mod file. + ## Available cache features in detail ### A simple cache @@ -46,7 +78,7 @@ Here is a simple cache instantiation with Redis but you can also look at other a #### Memcache ```go -memcacheStore := store.NewMemcache( +memcacheStore := memcache_store.NewMemcache( memcache.New("10.0.0.1:11211", "10.0.0.2:11211", "10.0.0.3:11212"), store.WithExpiration(10*time.Second), ) @@ -70,7 +102,7 @@ cacheManager.Clear(ctx) // Clears the entire cache, in case you want to flush al ```go bigcacheClient, _ := bigcache.NewBigCache(bigcache.DefaultConfig(5 * time.Minute)) -bigcacheStore := store.NewBigcache(bigcacheClient) +bigcacheStore := bigcache_store.NewBigcache(bigcacheClient) cacheManager := cache.New[[]byte](bigcacheStore) err := cacheManager.Set(ctx, "my-key", []byte("my-value")) @@ -92,7 +124,7 @@ ristrettoCache, err := ristretto.NewCache(&ristretto.Config{ if err != nil { panic(err) } -ristrettoStore := store.NewRistretto(ristrettoCache) +ristrettoStore := ristretto_store.NewRistretto(ristrettoCache) cacheManager := cache.New[string](ristrettoStore) err := cacheManager.Set(ctx, "my-key", "my-value", store.WithCost(2)) @@ -109,7 +141,7 @@ cacheManager.Delete(ctx, "my-key") ```go gocacheClient := gocache.New(5*time.Minute, 10*time.Minute) -gocacheStore := store.NewGoCache(gocacheClient) +gocacheStore := gocache_store.NewGoCache(gocacheClient) cacheManager := cache.New[[]byte](gocacheStore) err := cacheManager.Set(ctx, "my-key", []byte("my-value")) @@ -127,7 +159,7 @@ fmt.Printf("%s", value) #### Redis ```go -redisStore := store.NewRedis(redis.NewClient(&redis.Options{ +redisStore := redis_store.NewRedis(redis.NewClient(&redis.Options{ Addr: "127.0.0.1:6379", })) @@ -151,7 +183,7 @@ switch err { #### Freecache ```go -freecacheStore := store.NewFreecache(freecache.NewCache(1000), store.WithExpiration(10 * time.Second)) +freecacheStore := freecache_store.NewFreecache(freecache.NewCache(1000), store.WithExpiration(10 * time.Second)) cacheManager := cache.New[[]byte](freecacheStore) err := cacheManager.Set(ctx, "by-key", []byte("my-value"), opts) @@ -165,7 +197,7 @@ value := cacheManager.Get(ctx, "my-key") #### Pegasus ```go -pegasusStore, err := store.NewPegasus(&store.OptionsPegasus{ +pegasusStore, err := pegasus_store.NewPegasus(&store.OptionsPegasus{ MetaServers: []string{"127.0.0.1:34601", "127.0.0.1:34602", "127.0.0.1:34603"}, }) @@ -197,8 +229,8 @@ if err != nil { redisClient := redis.NewClient(&redis.Options{Addr: "127.0.0.1:6379"}) // Initialize stores -ristrettoStore := store.NewRistretto(ristrettoCache) -redisStore := store.NewRedis(redisClient, store.WithExpiration(5*time.Second)) +ristrettoStore := ristretto_store.NewRistretto(ristrettoCache) +redisStore := redis_store.NewRedis(redisClient, store.WithExpiration(5*time.Second)) // Initialize chained cache cacheManager := cache.NewChain[any]( @@ -223,7 +255,7 @@ type Book struct { // Initialize Redis client and store redisClient := redis.NewClient(&redis.Options{Addr: "127.0.0.1:6379"}) -redisStore := store.NewRedis(redisClient) +redisStore := redis_store.NewRedis(redisClient) // Initialize a load function that loads your data from a custom source loadFunction := func(ctx context.Context, key any) (*Book, error) { @@ -249,7 +281,7 @@ This cache will record metrics depending on the metric provider you pass to it. ```go // Initialize Redis client and store redisClient := redis.NewClient(&redis.Options{Addr: "127.0.0.1:6379"}) -redisStore := store.NewRedis(redisClient) +redisStore := redis_store.NewRedis(redisClient) // Initializes Prometheus metrics service promMetrics := metrics.NewPrometheus("my-test-app") @@ -270,7 +302,7 @@ Some caches like Redis stores and returns the value as a string so you have to m ```go // Initialize Redis client and store redisClient := redis.NewClient(&redis.Options{Addr: "127.0.0.1:6379"}) -redisStore := store.NewRedis(redisClient) +redisStore := redis_store.NewRedis(redisClient) // Initialize chained cache cacheManager := cache.NewMetric[any]( @@ -313,7 +345,7 @@ Here is an example on how to use it: ```go // Initialize Redis client and store redisClient := redis.NewClient(&redis.Options{Addr: "127.0.0.1:6379"}) -redisStore := store.NewRedis(redisClient) +redisStore := redis_store.NewRedis(redisClient) // Initialize chained cache cacheManager := cache.NewMetric[*Book]( @@ -353,13 +385,13 @@ package main import ( "log" - "github.com/eko/gocache/v3/generic" - "github.com/eko/gocache/v3/cache" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/generic" + "github.com/eko/gocache/v4/lib/cache" + "github.com/eko/gocache/v4/lib/store" ) func main() { - redisStore := store.NewRedis(redis.NewClient(&redis.Options{ + redisStore := redis_store.NewRedis(redis.NewClient(&redis.Options{ Addr: "127.0.0.1:6379", }), nil) @@ -378,26 +410,6 @@ func main() { } ``` -## Installation - -To begin working with the latest version of go-cache, you can use the following command: - -```go -go get github.com/eko/gocache/v3 -``` - -To avoid any errors when trying to import your libraries use the following import statement: - -```go -import ( - "github.com/eko/gocache/v3/cache" - "github.com/eko/gocache/v3/store" -) -``` - -If you run into any errors, please be sure to run `go mod tidy` to clean your go.mod file. - - ### Write your own custom cache Cache respect the following interface so you can write your own (proprietary?) cache logic if needed by implementing the following interface: @@ -460,9 +472,9 @@ type CacheKeyGenerator interface { ## Run tests -Generate mocks: +To generate mocks usng mockgen library, run: + ```bash -$ go get github.com/golang/mock/mockgen $ make mocks ``` @@ -470,7 +482,6 @@ Test suite can be run with: ```bash $ make test # run unit test -$ make benchmark-store # run benchmark test ``` ## Community diff --git a/go.mod b/go.mod deleted file mode 100644 index aa75be6..0000000 --- a/go.mod +++ /dev/null @@ -1,51 +0,0 @@ -module github.com/eko/gocache/v3 - -go 1.19 - -require ( - github.com/XiaoMi/pegasus-go-client v0.0.0-20210427083443-f3b6b08bc4c2 - github.com/allegro/bigcache/v3 v3.1.0 - github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822 - github.com/coocood/freecache v1.2.3 - github.com/dgraph-io/ristretto v0.1.1 - github.com/go-redis/redis/v8 v8.11.5 - github.com/golang/mock v1.6.0 - github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/prometheus/client_golang v1.14.0 - github.com/smartystreets/assertions v1.13.0 - github.com/smartystreets/goconvey v1.7.2 - github.com/spf13/cast v1.5.0 - github.com/stretchr/testify v1.8.1 - github.com/vmihailenco/msgpack v4.0.4+incompatible - golang.org/x/exp v0.0.0-20221110155412-d0897a79cd37 - golang.org/x/sync v0.1.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/golang/glog v1.0.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/gopherjs/gopherjs v1.17.2 // indirect - github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/pegasus-kv/thrift v0.13.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - golang.org/x/net v0.2.0 // indirect - golang.org/x/sys v0.2.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.25.3 // indirect -) diff --git a/cache/cache.go b/lib/cache/cache.go similarity index 97% rename from cache/cache.go rename to lib/cache/cache.go index 22e79f6..2459a92 100644 --- a/cache/cache.go +++ b/lib/cache/cache.go @@ -7,8 +7,8 @@ import ( "reflect" "time" - "github.com/eko/gocache/v3/codec" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/codec" + "github.com/eko/gocache/v4/lib/store" ) const ( diff --git a/test/mocks/cache/cache_interface.go b/lib/cache/cache_mock.go similarity index 99% rename from test/mocks/cache/cache_interface.go rename to lib/cache/cache_mock.go index 33ddec5..dc44794 100644 --- a/test/mocks/cache/cache_interface.go +++ b/lib/cache/cache_mock.go @@ -2,15 +2,15 @@ // Source: cache/interface.go // Package mocks is a generated GoMock package. -package mocks +package cache import ( context "context" reflect "reflect" time "time" - codec "github.com/eko/gocache/v3/codec" - store "github.com/eko/gocache/v3/store" + codec "github.com/eko/gocache/v4/lib/codec" + store "github.com/eko/gocache/v4/lib/store" gomock "github.com/golang/mock/gomock" ) diff --git a/cache/cache_test.go b/lib/cache/cache_test.go similarity index 86% rename from cache/cache_test.go rename to lib/cache/cache_test.go index 698e906..f1ba238 100644 --- a/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -6,9 +6,8 @@ import ( "testing" "time" - "github.com/eko/gocache/v3/codec" - "github.com/eko/gocache/v3/store" - mocksStore "github.com/eko/gocache/v3/test/mocks/store" + "github.com/eko/gocache/v4/lib/codec" + "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -17,7 +16,7 @@ func TestNew(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) // When cache := New[any](store) @@ -41,7 +40,7 @@ func TestCacheSet(t *testing.T) { Hello: "world", } - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Set(ctx, "my-key", value, store.OptionsMatcher{ Expiration: 5 * time.Second, }).Return(nil) @@ -67,7 +66,7 @@ func TestCacheSetWhenErrorOccurs(t *testing.T) { storeErr := errors.New("an error has occurred while inserting data into store") - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Set(ctx, "my-key", value, store.OptionsMatcher{ Expiration: 5 * time.Second, }).Return(storeErr) @@ -91,7 +90,7 @@ func TestCacheGet(t *testing.T) { Hello: "world", } - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Get(ctx, "my-key").Return(cacheValue, nil) cache := New[any](store) @@ -112,7 +111,7 @@ func TestCacheGetWhenNotFound(t *testing.T) { returnedErr := errors.New("unable to find item in store") - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Get(ctx, "my-key").Return(nil, returnedErr) cache := New[any](store) @@ -138,7 +137,7 @@ func TestCacheGetWithTTL(t *testing.T) { } expiration := 1 * time.Second - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().GetWithTTL(ctx, "my-key"). Return(cacheValue, expiration, nil) @@ -162,7 +161,7 @@ func TestCacheGetWithTTLWhenNotFound(t *testing.T) { returnedErr := errors.New("unable to find item in store") expiration := 0 * time.Second - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().GetWithTTL(ctx, "my-key"). Return(nil, expiration, returnedErr) @@ -181,7 +180,7 @@ func TestCacheGetCodec(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) cache := New[any](store) @@ -197,7 +196,7 @@ func TestCacheGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) cache := New[any](store) @@ -209,7 +208,7 @@ func TestCacheGetCacheKeyWhenKeyIsString(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) cache := New[any](store) @@ -224,7 +223,7 @@ func TestCacheGetCacheKeyWhenKeyIsStruct(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) cache := New[any](store) @@ -251,7 +250,7 @@ func TestCacheGetCacheKeyWhenKeyImplementsGenerator(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) cache := New[any](store) @@ -269,7 +268,7 @@ func TestCacheDelete(t *testing.T) { ctx := context.Background() - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Delete(ctx, "my-key").Return(nil) cache := New[any](store) @@ -287,7 +286,7 @@ func TestCacheInvalidate(t *testing.T) { ctx := context.Background() - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Invalidate(ctx, store.InvalidateOptionsMatcher{ Tags: []string{"tag1"}, }).Return(nil) @@ -309,7 +308,7 @@ func TestCacheInvalidateWhenError(t *testing.T) { expectedErr := errors.New("unexpected error during invalidation") - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Invalidate(ctx, store.InvalidateOptionsMatcher{ Tags: []string{"tag1"}, }).Return(expectedErr) @@ -329,7 +328,7 @@ func TestCacheClear(t *testing.T) { ctx := context.Background() - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Clear(ctx).Return(nil) cache := New[any](store) @@ -349,7 +348,7 @@ func TestCacheClearWhenError(t *testing.T) { expectedErr := errors.New("unexpected error during invalidation") - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Clear(ctx).Return(expectedErr) cache := New[any](store) @@ -369,7 +368,7 @@ func TestCacheDeleteWhenError(t *testing.T) { expectedErr := errors.New("unable to delete key") - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Delete(ctx, "my-key").Return(expectedErr) cache := New[any](store) diff --git a/cache/chain.go b/lib/cache/chain.go similarity index 98% rename from cache/chain.go rename to lib/cache/chain.go index aa01e74..2bb2355 100644 --- a/cache/chain.go +++ b/lib/cache/chain.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/store" ) const ( diff --git a/cache/chain_test.go b/lib/cache/chain_test.go similarity index 76% rename from cache/chain_test.go rename to lib/cache/chain_test.go index 51ba0d5..6df9d90 100644 --- a/cache/chain_test.go +++ b/lib/cache/chain_test.go @@ -7,10 +7,8 @@ import ( "testing" "time" - "github.com/eko/gocache/v3/store" - mocksCache "github.com/eko/gocache/v3/test/mocks/cache" - mocksCodec "github.com/eko/gocache/v3/test/mocks/codec" - mocksStore "github.com/eko/gocache/v3/test/mocks/store" + "github.com/eko/gocache/v4/lib/codec" + "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -19,8 +17,8 @@ func TestNewChain(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) // When cache := NewChain[any](cache1, cache2) @@ -35,8 +33,8 @@ func TestChainGetCaches(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache := NewChain[any](cache1, cache2) @@ -63,19 +61,19 @@ func TestChainGetWhenAvailableInFirstCache(t *testing.T) { } // Cache 1 - store1 := mocksStore.NewMockStoreInterface(ctrl) + store1 := store.NewMockStoreInterface(ctrl) store1.EXPECT().GetType().AnyTimes().Return("store1") - codec1 := mocksCodec.NewMockCodecInterface(ctrl) + codec1 := codec.NewMockCodecInterface(ctrl) codec1.EXPECT().GetStore().AnyTimes().Return(store1) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().GetCodec().AnyTimes().Return(codec1) cache1.EXPECT().GetWithTTL(ctx, "my-key").Return(cacheValue, 0*time.Second, nil) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache := NewChain[any](cache1, cache2) @@ -103,26 +101,26 @@ func TestChainGetWhenAvailableInSecondCache(t *testing.T) { } // Cache 1 - store1 := mocksStore.NewMockStoreInterface(ctrl) + store1 := store.NewMockStoreInterface(ctrl) store1.EXPECT().GetType().AnyTimes().Return("store1") - codec1 := mocksCodec.NewMockCodecInterface(ctrl) + codec1 := codec.NewMockCodecInterface(ctrl) codec1.EXPECT().GetStore().AnyTimes().Return(store1) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().GetCodec().AnyTimes().Return(codec1) cache1.EXPECT().GetWithTTL(ctx, "my-key").Return(nil, 0*time.Second, errors.New("unable to find in cache 1")) cache1.EXPECT().Set(ctx, "my-key", cacheValue, &store.OptionsMatcher{}).AnyTimes().Return(nil) // Cache 2 - store2 := mocksStore.NewMockStoreInterface(ctrl) + store2 := store.NewMockStoreInterface(ctrl) store2.EXPECT().GetType().AnyTimes().Return("store2") - codec2 := mocksCodec.NewMockCodecInterface(ctrl) + codec2 := codec.NewMockCodecInterface(ctrl) codec2.EXPECT().GetStore().AnyTimes().Return(store2) - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().GetCodec().AnyTimes().Return(codec2) cache2.EXPECT().GetWithTTL(ctx, "my-key").Return(cacheValue, 0*time.Second, nil) @@ -147,25 +145,25 @@ func TestChainGetWhenNotAvailableInAnyCache(t *testing.T) { ctx := context.Background() // Cache 1 - store1 := mocksStore.NewMockStoreInterface(ctrl) + store1 := store.NewMockStoreInterface(ctrl) store1.EXPECT().GetType().Return("store1") - codec1 := mocksCodec.NewMockCodecInterface(ctrl) + codec1 := codec.NewMockCodecInterface(ctrl) codec1.EXPECT().GetStore().Return(store1) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().GetCodec().Return(codec1) cache1.EXPECT().GetWithTTL(ctx, "my-key").Return(nil, 0*time.Second, errors.New("unable to find in cache 1")) // Cache 2 - store2 := mocksStore.NewMockStoreInterface(ctrl) + store2 := store.NewMockStoreInterface(ctrl) store2.EXPECT().GetType().Return("store2") - codec2 := mocksCodec.NewMockCodecInterface(ctrl) + codec2 := codec.NewMockCodecInterface(ctrl) codec2.EXPECT().GetStore().Return(store2) - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().GetCodec().Return(codec2) cache2.EXPECT().GetWithTTL(ctx, "my-key").Return(nil, 0*time.Second, errors.New("unable to find in cache 2")) @@ -196,11 +194,11 @@ func TestChainSet(t *testing.T) { } // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Set(ctx, "my-key", cacheValue).Return(nil) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Set(ctx, "my-key", cacheValue).Return(nil) cache := NewChain[any](cache1, cache2) @@ -227,18 +225,18 @@ func TestChainSetWhenErrorOnSetting(t *testing.T) { expectedErr := errors.New("an unexpected error occurred while setting data") // Cache 1 - store1 := mocksStore.NewMockStoreInterface(ctrl) + store1 := store.NewMockStoreInterface(ctrl) store1.EXPECT().GetType().Return("store1") - codec1 := mocksCodec.NewMockCodecInterface(ctrl) + codec1 := codec.NewMockCodecInterface(ctrl) codec1.EXPECT().GetStore().Return(store1) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().GetCodec().Return(codec1) cache1.EXPECT().Set(ctx, "my-key", cacheValue).Return(expectedErr) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Set(ctx, "my-key", cacheValue) cache := NewChain[any](cache1, cache2) @@ -258,11 +256,11 @@ func TestChainDelete(t *testing.T) { ctx := context.Background() // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Delete(ctx, "my-key").Return(nil) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Delete(ctx, "my-key").Return(nil) cache := NewChain[any](cache1, cache2) @@ -281,11 +279,11 @@ func TestChainDeleteWhenError(t *testing.T) { ctx := context.Background() // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Delete(ctx, "my-key").Return(errors.New("an error has occurred while deleting key")) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Delete(ctx, "my-key").Return(nil) cache := NewChain[any](cache1, cache2) @@ -304,11 +302,11 @@ func TestChainInvalidate(t *testing.T) { ctx := context.Background() // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Invalidate(ctx).Return(nil) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Invalidate(ctx).Return(nil) cache := NewChain[any](cache1, cache2) @@ -327,11 +325,11 @@ func TestChainInvalidateWhenError(t *testing.T) { ctx := context.Background() // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Invalidate(ctx).Return(errors.New("an unexpected error has occurred while invalidation data")) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Invalidate(ctx).Return(nil) cache := NewChain[any](cache1, cache2) @@ -350,11 +348,11 @@ func TestChainClear(t *testing.T) { ctx := context.Background() // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Clear(ctx).Return(nil) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Clear(ctx).Return(nil) cache := NewChain[any](cache1, cache2) @@ -373,11 +371,11 @@ func TestChainClearWhenError(t *testing.T) { ctx := context.Background() // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Clear(ctx).Return(errors.New("an unexpected error has occurred while invalidation data")) // Cache 2 - cache2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache2 := NewMockSetterCacheInterface[any](ctrl) cache2.EXPECT().Clear(ctx).Return(nil) cache := NewChain[any](cache1, cache2) @@ -393,7 +391,7 @@ func TestChainGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache := NewChain[any](cache1) @@ -424,10 +422,10 @@ func TestCacheChecksum(t *testing.T) { func TestChainSetWhenErrorInChain(t *testing.T) { // Given ctrl := gomock.NewController(t) - store1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + store1 := NewMockSetterCacheInterface[any](ctrl) store1.EXPECT().GetType().AnyTimes().Return("store1") - codec1 := mocksCodec.NewMockCodecInterface(ctrl) + codec1 := codec.NewMockCodecInterface(ctrl) codec1.EXPECT().GetStore().AnyTimes().Return(store1) store1.EXPECT().GetCodec().AnyTimes().Return(codec1) @@ -439,7 +437,7 @@ func TestChainSetWhenErrorInChain(t *testing.T) { return interError }) - store2 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + store2 := NewMockSetterCacheInterface[any](ctrl) cache := NewChain[any](store1, store2) diff --git a/cache/interface.go b/lib/cache/interface.go similarity index 93% rename from cache/interface.go rename to lib/cache/interface.go index 0608bff..f0aec24 100644 --- a/cache/interface.go +++ b/lib/cache/interface.go @@ -4,8 +4,8 @@ import ( "context" "time" - "github.com/eko/gocache/v3/codec" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/codec" + "github.com/eko/gocache/v4/lib/store" ) // CacheInterface represents the interface for all caches (aggregates, metric, memory, redis, ...) diff --git a/cache/loadable.go b/lib/cache/loadable.go similarity index 98% rename from cache/loadable.go rename to lib/cache/loadable.go index abe7c5f..a9fceac 100644 --- a/cache/loadable.go +++ b/lib/cache/loadable.go @@ -4,7 +4,7 @@ import ( "context" "sync" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/store" ) const ( diff --git a/cache/loadable_test.go b/lib/cache/loadable_test.go similarity index 78% rename from cache/loadable_test.go rename to lib/cache/loadable_test.go index 8433d06..9e0e487 100644 --- a/cache/loadable_test.go +++ b/lib/cache/loadable_test.go @@ -6,10 +6,7 @@ import ( "testing" "time" - "github.com/eko/gocache/v3/store" - mocksCache "github.com/eko/gocache/v3/test/mocks/cache" "github.com/golang/mock/gomock" - gocache "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" ) @@ -17,7 +14,7 @@ func TestNewLoadable(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) loadFunc := func(_ context.Context, key any) (any, error) { return "test data loaded", nil @@ -45,7 +42,7 @@ func TestLoadableGetWhenAlreadyInCache(t *testing.T) { Hello: "world", } - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Get(ctx, "my-key").Return(cacheValue, nil) loadFunc := func(_ context.Context, key any) (any, error) { @@ -69,7 +66,7 @@ func TestLoadableGetWhenNotAvailableInLoadFunc(t *testing.T) { ctx := context.Background() // Cache - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Get(ctx, "my-key").Return(nil, errors.New("unable to find in cache 1")) loadFunc := func(_ context.Context, key any) (any, error) { @@ -99,7 +96,7 @@ func TestLoadableGetWhenAvailableInLoadFunc(t *testing.T) { } // Cache 1 - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Get(ctx, "my-key").Return(nil, errors.New("unable to find in cache 1")) cache1.EXPECT().Set(ctx, "my-key", cacheValue).AnyTimes().Return(nil) @@ -128,7 +125,7 @@ func TestLoadableDelete(t *testing.T) { ctx := context.Background() - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Delete(ctx, "my-key").Return(nil) loadFunc := func(_ context.Context, key any) (any, error) { @@ -152,7 +149,7 @@ func TestLoadableDeleteWhenError(t *testing.T) { expectedErr := errors.New("unable to delete key") - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Delete(ctx, "my-key").Return(expectedErr) loadFunc := func(_ context.Context, key any) (any, error) { @@ -174,7 +171,7 @@ func TestLoadableInvalidate(t *testing.T) { ctx := context.Background() - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Invalidate(ctx).Return(nil) loadFunc := func(_ context.Context, key any) (any, error) { @@ -198,7 +195,7 @@ func TestLoadableInvalidateWhenError(t *testing.T) { expectedErr := errors.New("unexpected error when invalidating data") - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Invalidate(ctx).Return(expectedErr) loadFunc := func(_ context.Context, key any) (any, error) { @@ -220,7 +217,7 @@ func TestLoadableClear(t *testing.T) { ctx := context.Background() - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Clear(ctx).Return(nil) loadFunc := func(_ context.Context, key any) (any, error) { @@ -244,7 +241,7 @@ func TestLoadableClearWhenError(t *testing.T) { expectedErr := errors.New("unexpected error when invalidating data") - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Clear(ctx).Return(expectedErr) loadFunc := func(_ context.Context, key any) (any, error) { @@ -264,7 +261,7 @@ func TestLoadableGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) loadFunc := func(_ context.Context, key any) (any, error) { return "test data loaded", nil @@ -275,27 +272,3 @@ func TestLoadableGetType(t *testing.T) { // When - Then assert.Equal(t, LoadableType, cache.GetType()) } - -func TestLoadableGocache(t *testing.T) { - gocacheClient := gocache.New(5*time.Second, 5*time.Second) - gocacheStore := store.NewGoCache(gocacheClient, store.WithExpiration(5*time.Second)) - - cacheValue := "my-value" - loadFunc := func(ctx context.Context, accountID any) (string, error) { - return cacheValue, nil - } - - cache := NewLoadable[string](loadFunc, New[string](gocacheStore)) - - // When - value, err := cache.Get(context.Background(), "my-key") - - // Wait for data to be processed - for len(cache.setChannel) > 0 { - time.Sleep(1 * time.Millisecond) - } - - // Then - assert.Nil(t, err) - assert.Equal(t, cacheValue, value) -} diff --git a/cache/metric.go b/lib/cache/metric.go similarity index 95% rename from cache/metric.go rename to lib/cache/metric.go index f2ab63e..8319f64 100644 --- a/cache/metric.go +++ b/lib/cache/metric.go @@ -3,8 +3,8 @@ package cache import ( "context" - "github.com/eko/gocache/v3/metrics" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/metrics" + "github.com/eko/gocache/v4/lib/store" ) const ( diff --git a/cache/metric_test.go b/lib/cache/metric_test.go similarity index 71% rename from cache/metric_test.go rename to lib/cache/metric_test.go index a942257..0ae867c 100644 --- a/cache/metric_test.go +++ b/lib/cache/metric_test.go @@ -6,10 +6,9 @@ import ( "testing" "time" - mocksCache "github.com/eko/gocache/v3/test/mocks/cache" - mocksCodec "github.com/eko/gocache/v3/test/mocks/codec" - mocksMetrics "github.com/eko/gocache/v3/test/mocks/metrics" - mocksStore "github.com/eko/gocache/v3/test/mocks/store" + "github.com/eko/gocache/v4/lib/codec" + "github.com/eko/gocache/v4/lib/metrics" + "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -18,8 +17,8 @@ func TestNewMetric(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) // When cache := NewMetric[any](metrics, cache1) @@ -43,12 +42,12 @@ func TestMetricGet(t *testing.T) { Hello: "world", } - codec1 := mocksCodec.NewMockCodecInterface(ctrl) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + codec1 := codec.NewMockCodecInterface(ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Get(ctx, "my-key").Return(cacheValue, nil) cache1.EXPECT().GetCodec().Return(codec1) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) metrics.EXPECT().RecordFromCodec(codec1).AnyTimes() cache := NewMetric[any](metrics, cache1) @@ -73,20 +72,20 @@ func TestMetricGetWhenChainCache(t *testing.T) { Hello: "world", } - store1 := mocksStore.NewMockStoreInterface(ctrl) + store1 := store.NewMockStoreInterface(ctrl) store1.EXPECT().GetType().AnyTimes().Return("store1") - codec1 := mocksCodec.NewMockCodecInterface(ctrl) + codec1 := codec.NewMockCodecInterface(ctrl) codec1.EXPECT().GetStore().AnyTimes().Return(store1) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().GetWithTTL(ctx, "my-key").Return(cacheValue, 0*time.Second, nil) cache1.EXPECT().GetCodec().AnyTimes().Return(codec1) chainCache := NewChain[any](cache1) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) metrics.EXPECT().RecordFromCodec(codec1).AnyTimes() cache := NewMetric[any](metrics, chainCache) @@ -111,10 +110,10 @@ func TestMetricSet(t *testing.T) { Hello: "world", } - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Set(ctx, "my-key", value).Return(nil) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) @@ -131,10 +130,10 @@ func TestMetricDelete(t *testing.T) { ctx := context.Background() - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Delete(ctx, "my-key").Return(nil) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) @@ -153,10 +152,10 @@ func TestMetricDeleteWhenError(t *testing.T) { expectedErr := errors.New("unable to delete key") - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Delete(ctx, "my-key").Return(expectedErr) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) @@ -173,10 +172,10 @@ func TestMetricInvalidate(t *testing.T) { ctx := context.Background() - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Invalidate(ctx).Return(nil) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) @@ -195,10 +194,10 @@ func TestMetricInvalidateWhenError(t *testing.T) { expectedErr := errors.New("unexpected error while invalidating data") - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Invalidate(ctx).Return(expectedErr) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) @@ -215,10 +214,10 @@ func TestMetricClear(t *testing.T) { ctx := context.Background() - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Clear(ctx).Return(nil) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) @@ -237,10 +236,10 @@ func TestMetricClearWhenError(t *testing.T) { expectedErr := errors.New("unexpected error while clearing cache") - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) cache1.EXPECT().Clear(ctx).Return(expectedErr) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) @@ -255,8 +254,8 @@ func TestMetricGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache1 := mocksCache.NewMockSetterCacheInterface[any](ctrl) - metrics := mocksMetrics.NewMockMetricsInterface(ctrl) + cache1 := NewMockSetterCacheInterface[any](ctrl) + metrics := metrics.NewMockMetricsInterface(ctrl) cache := NewMetric[any](metrics, cache1) diff --git a/codec/codec.go b/lib/codec/codec.go similarity index 98% rename from codec/codec.go rename to lib/codec/codec.go index 05a9c9b..cde92cf 100644 --- a/codec/codec.go +++ b/lib/codec/codec.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/store" ) // Stats allows to returns some statistics of codec usage diff --git a/test/mocks/codec/codec_interface.go b/lib/codec/codec_mock.go similarity index 96% rename from test/mocks/codec/codec_interface.go rename to lib/codec/codec_mock.go index c3df4f6..914af2c 100644 --- a/test/mocks/codec/codec_interface.go +++ b/lib/codec/codec_mock.go @@ -2,15 +2,14 @@ // Source: codec/interface.go // Package mocks is a generated GoMock package. -package mocks +package codec import ( context "context" reflect "reflect" time "time" - codec "github.com/eko/gocache/v3/codec" - store "github.com/eko/gocache/v3/store" + store "github.com/eko/gocache/v4/lib/store" gomock "github.com/golang/mock/gomock" ) @@ -81,10 +80,10 @@ func (mr *MockCodecInterfaceMockRecorder) Get(ctx, key interface{}) *gomock.Call } // GetStats mocks base method. -func (m *MockCodecInterface) GetStats() *codec.Stats { +func (m *MockCodecInterface) GetStats() *Stats { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetStats") - ret0, _ := ret[0].(*codec.Stats) + ret0, _ := ret[0].(*Stats) return ret0 } diff --git a/codec/codec_test.go b/lib/codec/codec_test.go similarity index 92% rename from codec/codec_test.go rename to lib/codec/codec_test.go index 6b87da2..2c39086 100644 --- a/codec/codec_test.go +++ b/lib/codec/codec_test.go @@ -6,8 +6,7 @@ import ( "testing" "time" - "github.com/eko/gocache/v3/store" - mocksStore "github.com/eko/gocache/v3/test/mocks/store" + "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -16,7 +15,7 @@ func TestNew(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) // When codec := New(store) @@ -37,7 +36,7 @@ func TestGetWhenHit(t *testing.T) { Hello: "world", } - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Get(ctx, "my-key").Return(cacheValue, nil) codec := New(store) @@ -73,7 +72,7 @@ func TestGetWithTTLWhenHit(t *testing.T) { Hello: "world", } - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().GetWithTTL(ctx, "my-key").Return(cacheValue, 1*time.Second, nil) codec := New(store) @@ -106,7 +105,7 @@ func TestGetWithTTLWhenMiss(t *testing.T) { expectedErr := errors.New("unable to find in store") - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().GetWithTTL(ctx, "my-key").Return(nil, 0*time.Second, expectedErr) codec := New(store) @@ -139,7 +138,7 @@ func TestGetWhenMiss(t *testing.T) { expectedErr := errors.New("unable to find in store") - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Get(ctx, "my-key").Return(nil, expectedErr) codec := New(store) @@ -175,7 +174,7 @@ func TestSetWhenSuccess(t *testing.T) { Hello: "world", } - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Set(ctx, "my-key", cacheValue, store.OptionsMatcher{ Expiration: 5 * time.Second, }).Return(nil) @@ -214,7 +213,7 @@ func TestSetWhenError(t *testing.T) { expectedErr := errors.New("unable to set value in store") - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Set(ctx, "my-key", cacheValue, store.OptionsMatcher{ Expiration: 5 * time.Second, }).Return(expectedErr) @@ -245,7 +244,7 @@ func TestDeleteWhenSuccess(t *testing.T) { ctx := context.Background() - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Delete(ctx, "my-key").Return(nil) codec := New(store) @@ -276,7 +275,7 @@ func TesDeleteWhenError(t *testing.T) { expectedErr := errors.New("unable to delete key") - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Delete(ctx, "my-key").Return(expectedErr) codec := New(store) @@ -305,7 +304,7 @@ func TestInvalidateWhenSuccess(t *testing.T) { ctx := context.Background() - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Invalidate(ctx, store.InvalidateOptionsMatcher{ Tags: []string{"tag1"}, }).Return(nil) @@ -338,7 +337,7 @@ func TestInvalidateWhenError(t *testing.T) { expectedErr := errors.New("unexpected error when invalidating data") - mockedStore := mocksStore.NewMockStoreInterface(ctrl) + mockedStore := store.NewMockStoreInterface(ctrl) mockedStore.EXPECT().Invalidate(ctx, store.InvalidateOptionsMatcher{ Tags: []string{"tag1"}, }).Return(expectedErr) @@ -369,7 +368,7 @@ func TestClearWhenSuccess(t *testing.T) { ctx := context.Background() - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Clear(ctx).Return(nil) codec := New(store) @@ -400,7 +399,7 @@ func TestClearWhenError(t *testing.T) { expectedErr := errors.New("unexpected error when clearing cache") - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) store.EXPECT().Clear(ctx).Return(expectedErr) codec := New(store) @@ -427,7 +426,7 @@ func TestGetStore(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) codec := New(store) @@ -439,7 +438,7 @@ func TestGetStats(t *testing.T) { // Given ctrl := gomock.NewController(t) - store := mocksStore.NewMockStoreInterface(ctrl) + store := store.NewMockStoreInterface(ctrl) codec := New(store) diff --git a/codec/interface.go b/lib/codec/interface.go similarity index 93% rename from codec/interface.go rename to lib/codec/interface.go index 66edc60..fb6f5df 100644 --- a/codec/interface.go +++ b/lib/codec/interface.go @@ -4,7 +4,7 @@ import ( "context" "time" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/store" ) // CodecInterface represents an instance of a cache codec diff --git a/lib/go.mod b/lib/go.mod new file mode 100644 index 0000000..8ba72e2 --- /dev/null +++ b/lib/go.mod @@ -0,0 +1,28 @@ +module github.com/eko/gocache/v4/lib + +go 1.19 + +require ( + github.com/golang/mock v1.6.0 + github.com/prometheus/client_golang v1.14.0 + github.com/stretchr/testify v1.8.1 + github.com/vmihailenco/msgpack v4.0.4+incompatible + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.1.0 // indirect + google.golang.org/appengine v1.6.6 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/lib/go.sum similarity index 78% rename from go.sum rename to lib/go.sum index 219ac43..a2d6c83 100644 --- a/go.sum +++ b/lib/go.sum @@ -31,30 +31,17 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/XiaoMi/pegasus-go-client v0.0.0-20210427083443-f3b6b08bc4c2 h1:pami0oPhVosjOu/qRHepRmdjD6hGILF7DBr+qQZeP10= -github.com/XiaoMi/pegasus-go-client v0.0.0-20210427083443-f3b6b08bc4c2/go.mod h1:jNIx5ykW1MroBuaTja9+VpglmaJOUzezumfhLlER3oY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk= -github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822 h1:hjXJeBcAMS1WGENGqDpzvmgS43oECTx8UXq31UBu0Jw= -github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= -github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -64,34 +51,13 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coocood/freecache v1.2.3 h1:lcBwpZrwBZRZyLk/8EMyQVXRiFl663cCuMOrjCALeto= -github.com/coocood/freecache v1.2.3/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -103,20 +69,9 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -129,7 +84,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -159,7 +113,6 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -171,72 +124,41 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pegasus-kv/thrift v0.13.0 h1:4ESwaNoHImfbHa9RUGJiJZ4hrxorihZHk5aarYwY8d4= -github.com/pegasus-kv/thrift v0.13.0/go.mod h1:Gl9NT/WHG6ABm6NsrbfE8LiJN0sAyneCrvB4qN4NPqQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -266,30 +188,16 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= -github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= -github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= -github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= @@ -321,8 +229,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221110155412-d0897a79cd37 h1:wKMvZzBFHbOCGvF2OmxR5Fqv/jDlkt7slnPz5ejEU8A= -golang.org/x/exp v0.0.0-20221110155412-d0897a79cd37/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -344,10 +252,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -360,8 +266,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191105084925-a882066a44e0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -379,9 +283,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -399,12 +302,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -415,9 +314,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -445,13 +342,10 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -459,19 +353,15 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -532,9 +422,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -593,23 +482,14 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= -gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -621,15 +501,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= -k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/marshaler/marshaler.go b/lib/marshaler/marshaler.go similarity index 95% rename from marshaler/marshaler.go rename to lib/marshaler/marshaler.go index 9e8a411..422195b 100644 --- a/marshaler/marshaler.go +++ b/lib/marshaler/marshaler.go @@ -3,8 +3,8 @@ package marshaler import ( "context" - "github.com/eko/gocache/v3/cache" - "github.com/eko/gocache/v3/store" + "github.com/eko/gocache/v4/lib/cache" + "github.com/eko/gocache/v4/lib/store" "github.com/vmihailenco/msgpack" ) diff --git a/marshaler/marshaler_test.go b/lib/marshaler/marshaler_test.go similarity index 87% rename from marshaler/marshaler_test.go rename to lib/marshaler/marshaler_test.go index 9d3b66d..7556b79 100644 --- a/marshaler/marshaler_test.go +++ b/lib/marshaler/marshaler_test.go @@ -6,8 +6,8 @@ import ( "testing" "time" - "github.com/eko/gocache/v3/store" - mocksCache "github.com/eko/gocache/v3/test/mocks/cache" + "github.com/eko/gocache/v4/lib/cache" + "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/vmihailenco/msgpack" @@ -21,7 +21,7 @@ func TestNew(t *testing.T) { // Given ctrl := gomock.NewController(t) - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) // When marshaler := New(cache) @@ -46,7 +46,7 @@ func TestGetWhenStoreReturnsSliceOfBytes(t *testing.T) { assert.Error(t, err) } - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Get(ctx, "my-key").Return(cacheValueBytes, nil) marshaler := New(cache) @@ -74,7 +74,7 @@ func TestGetWhenStoreReturnsString(t *testing.T) { assert.Error(t, err) } - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Get(ctx, "my-key").Return(string(cacheValueBytes), nil) marshaler := New(cache) @@ -93,7 +93,7 @@ func TestGetWhenUnmarshalingError(t *testing.T) { ctx := context.Background() - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Get(ctx, "my-key").Return("unknown-string", nil) marshaler := New(cache) @@ -114,7 +114,7 @@ func TestGetWhenNotFoundInStore(t *testing.T) { expectedErr := errors.New("unable to find item in store") - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Get(ctx, "my-key").Return(nil, expectedErr) marshaler := New(cache) @@ -137,7 +137,7 @@ func TestSetWhenStruct(t *testing.T) { Hello: "world", } - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Set( ctx, "my-key", @@ -164,7 +164,7 @@ func TestSetWhenString(t *testing.T) { cacheValue := "test" - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Set( ctx, "my-key", @@ -193,7 +193,7 @@ func TestSetWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Set( ctx, "my-key", @@ -216,7 +216,7 @@ func TestDelete(t *testing.T) { ctx := context.Background() - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Delete(ctx, "my-key").Return(nil) marshaler := New(cache) @@ -236,7 +236,7 @@ func TestDeleteWhenError(t *testing.T) { expectedErr := errors.New("unable to delete key") - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Delete(ctx, "my-key").Return(expectedErr) marshaler := New(cache) @@ -254,7 +254,7 @@ func TestInvalidate(t *testing.T) { ctx := context.Background() - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Invalidate(ctx, store.InvalidateOptionsMatcher{ Tags: []string{"tag1"}, }).Return(nil) @@ -276,7 +276,7 @@ func TestInvalidatingWhenError(t *testing.T) { expectedErr := errors.New("unexpected error when invalidating data") - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Invalidate(ctx, store.InvalidateOptionsMatcher{Tags: []string{"tag1"}}).Return(expectedErr) marshaler := New(cache) @@ -294,7 +294,7 @@ func TestClear(t *testing.T) { ctx := context.Background() - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Clear(ctx).Return(nil) marshaler := New(cache) @@ -314,7 +314,7 @@ func TestClearWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - cache := mocksCache.NewMockCacheInterface[any](ctrl) + cache := cache.NewMockCacheInterface[any](ctrl) cache.EXPECT().Clear(ctx).Return(expectedErr) marshaler := New(cache) diff --git a/metrics/interface.go b/lib/metrics/interface.go similarity index 80% rename from metrics/interface.go rename to lib/metrics/interface.go index 7fb5008..2833c08 100644 --- a/metrics/interface.go +++ b/lib/metrics/interface.go @@ -1,6 +1,6 @@ package metrics -import "github.com/eko/gocache/v3/codec" +import "github.com/eko/gocache/v4/lib/codec" // MetricsInterface represents the metrics interface for all available providers type MetricsInterface interface { diff --git a/test/mocks/metrics/metrics_interface.go b/lib/metrics/metrics_mock.go similarity index 96% rename from test/mocks/metrics/metrics_interface.go rename to lib/metrics/metrics_mock.go index a1535d7..97ab5d5 100644 --- a/test/mocks/metrics/metrics_interface.go +++ b/lib/metrics/metrics_mock.go @@ -2,12 +2,12 @@ // Source: metrics/interface.go // Package mocks is a generated GoMock package. -package mocks +package metrics import ( reflect "reflect" - codec "github.com/eko/gocache/v3/codec" + codec "github.com/eko/gocache/v4/lib/codec" gomock "github.com/golang/mock/gomock" ) diff --git a/metrics/prometheus.go b/lib/metrics/prometheus.go similarity index 98% rename from metrics/prometheus.go rename to lib/metrics/prometheus.go index 7b818af..9713324 100644 --- a/metrics/prometheus.go +++ b/lib/metrics/prometheus.go @@ -1,7 +1,7 @@ package metrics import ( - "github.com/eko/gocache/v3/codec" + "github.com/eko/gocache/v4/lib/codec" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ) diff --git a/metrics/prometheus_test.go b/lib/metrics/prometheus_test.go similarity index 91% rename from metrics/prometheus_test.go rename to lib/metrics/prometheus_test.go index f15785b..a730544 100644 --- a/metrics/prometheus_test.go +++ b/lib/metrics/prometheus_test.go @@ -4,9 +4,8 @@ import ( "testing" "time" - "github.com/eko/gocache/v3/codec" - mocksCodec "github.com/eko/gocache/v3/test/mocks/codec" - mocksStore "github.com/eko/gocache/v3/test/mocks/store" + "github.com/eko/gocache/v4/lib/codec" + "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" @@ -48,7 +47,7 @@ func TestRecordFromCodec(t *testing.T) { // Given ctrl := gomock.NewController(t) - redisStore := mocksStore.NewMockStoreInterface(ctrl) + redisStore := store.NewMockStoreInterface(ctrl) redisStore.EXPECT().GetType().Return("redis") stats := &codec.Stats{ @@ -62,7 +61,7 @@ func TestRecordFromCodec(t *testing.T) { InvalidateError: 1, } - testCodec := mocksCodec.NewMockCodecInterface(ctrl) + testCodec := codec.NewMockCodecInterface(ctrl) testCodec.EXPECT().GetStats().Return(stats) testCodec.EXPECT().GetStore().Return(redisStore) diff --git a/misc/benchmarks.jpeg b/lib/misc/benchmarks.jpeg similarity index 100% rename from misc/benchmarks.jpeg rename to lib/misc/benchmarks.jpeg diff --git a/store/errors.go b/lib/store/errors.go similarity index 100% rename from store/errors.go rename to lib/store/errors.go diff --git a/store/errors_test.go b/lib/store/errors_test.go similarity index 65% rename from store/errors_test.go rename to lib/store/errors_test.go index c7d06fb..9727acb 100644 --- a/store/errors_test.go +++ b/lib/store/errors_test.go @@ -4,14 +4,13 @@ import ( "errors" "testing" - "github.com/go-redis/redis/v8" "github.com/stretchr/testify/assert" ) func TestNotFoundIs(t *testing.T) { - err := NotFoundWithCause(redis.Nil) - assert.True(t, errors.Is(err, NotFound{})) - assert.True(t, errors.Is(err, redis.Nil)) + expectedErr := errors.New(("this is an expected error cause")) + err := NotFoundWithCause(nil) + assert.True(t, errors.Is(err, NotFound{cause: expectedErr})) err2 := &NotFound{} assert.True(t, errors.Is(err2, &NotFound{})) diff --git a/store/interface.go b/lib/store/interface.go similarity index 100% rename from store/interface.go rename to lib/store/interface.go diff --git a/store/invalidate_options.go b/lib/store/invalidate_options.go similarity index 100% rename from store/invalidate_options.go rename to lib/store/invalidate_options.go diff --git a/store/invalidate_options_test.go b/lib/store/invalidate_options_test.go similarity index 100% rename from store/invalidate_options_test.go rename to lib/store/invalidate_options_test.go diff --git a/store/options.go b/lib/store/options.go similarity index 100% rename from store/options.go rename to lib/store/options.go diff --git a/store/options_test.go b/lib/store/options_test.go similarity index 100% rename from store/options_test.go rename to lib/store/options_test.go diff --git a/store/options_test_matchers.go b/lib/store/options_test_matchers.go similarity index 100% rename from store/options_test_matchers.go rename to lib/store/options_test_matchers.go diff --git a/test/mocks/store/store_interface.go b/lib/store/store_mock.go similarity index 97% rename from test/mocks/store/store_interface.go rename to lib/store/store_mock.go index ac8f580..bd8730e 100644 --- a/test/mocks/store/store_interface.go +++ b/lib/store/store_mock.go @@ -2,14 +2,13 @@ // Source: store/interface.go // Package mocks is a generated GoMock package. -package mocks +package store import ( context "context" reflect "reflect" time "time" - store "github.com/eko/gocache/v3/store" gomock "github.com/golang/mock/gomock" ) @@ -110,7 +109,7 @@ func (mr *MockStoreInterfaceMockRecorder) GetWithTTL(ctx, key interface{}) *gomo } // Invalidate mocks base method. -func (m *MockStoreInterface) Invalidate(ctx context.Context, options ...store.InvalidateOption) error { +func (m *MockStoreInterface) Invalidate(ctx context.Context, options ...InvalidateOption) error { m.ctrl.T.Helper() varargs := []interface{}{ctx} for _, a := range options { @@ -129,7 +128,7 @@ func (mr *MockStoreInterfaceMockRecorder) Invalidate(ctx interface{}, options .. } // Set mocks base method. -func (m *MockStoreInterface) Set(ctx context.Context, key, value any, options ...store.Option) error { +func (m *MockStoreInterface) Set(ctx context.Context, key, value any, options ...Option) error { m.ctrl.T.Helper() varargs := []interface{}{ctx, key, value} for _, a := range options { diff --git a/store/bigcache.go b/store/bigcache/bigcache.go similarity index 84% rename from store/bigcache.go rename to store/bigcache/bigcache.go index 47484a6..c5579d9 100644 --- a/store/bigcache.go +++ b/store/bigcache/bigcache.go @@ -1,4 +1,4 @@ -package store +package bigcache import ( "context" @@ -6,6 +6,8 @@ import ( "fmt" "strings" "time" + + "github.com/eko/gocache/v4/lib/store" ) // BigcacheClientInterface represents a allegro/bigcache client @@ -26,14 +28,14 @@ const ( // BigcacheStore is a store for Bigcache type BigcacheStore struct { client BigcacheClientInterface - options *Options + options *store.Options } // NewBigcache creates a new store to Bigcache instance(s) -func NewBigcache(client BigcacheClientInterface, options ...Option) *BigcacheStore { +func NewBigcache(client BigcacheClientInterface, options ...store.Option) *BigcacheStore { return &BigcacheStore{ client: client, - options: ApplyOptions(options...), + options: store.ApplyOptions(options...), } } @@ -44,7 +46,7 @@ func (s *BigcacheStore) Get(_ context.Context, key any) (any, error) { return nil, err } if item == nil { - return nil, NotFoundWithCause(errors.New("unable to retrieve data from bigcache")) + return nil, store.NotFoundWithCause(errors.New("unable to retrieve data from bigcache")) } return item, err @@ -57,8 +59,8 @@ func (s *BigcacheStore) GetWithTTL(ctx context.Context, key any) (any, time.Dura } // Set defines data in Bigcache for given key identifier -func (s *BigcacheStore) Set(ctx context.Context, key any, value any, options ...Option) error { - opts := ApplyOptionsWithDefault(s.options, options...) +func (s *BigcacheStore) Set(ctx context.Context, key any, value any, options ...store.Option) error { + opts := store.ApplyOptionsWithDefault(s.options, options...) var val []byte switch v := value.(type) { @@ -105,7 +107,7 @@ func (s *BigcacheStore) setTags(ctx context.Context, key any, tags []string) { cacheKeys = append(cacheKeys, key.(string)) } - s.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), WithExpiration(720*time.Hour)) + s.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), store.WithExpiration(720*time.Hour)) } } @@ -115,8 +117,8 @@ func (s *BigcacheStore) Delete(_ context.Context, key any) error { } // Invalidate invalidates some cache data in Bigcache for given options -func (s *BigcacheStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (s *BigcacheStore) Invalidate(ctx context.Context, options ...store.InvalidateOption) error { + opts := store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { diff --git a/store/bigcache_bench_test.go b/store/bigcache/bigcache_bench_test.go similarity index 86% rename from store/bigcache_bench_test.go rename to store/bigcache/bigcache_bench_test.go index d7773e2..40270e8 100644 --- a/store/bigcache_bench_test.go +++ b/store/bigcache/bigcache_bench_test.go @@ -1,4 +1,4 @@ -package store +package bigcache import ( "context" @@ -8,6 +8,7 @@ import ( "time" "github.com/allegro/bigcache/v3" + lib_store "github.com/eko/gocache/v4/lib/store" ) func BenchmarkBigcacheSet(b *testing.B) { @@ -23,7 +24,7 @@ func BenchmarkBigcacheSet(b *testing.B) { key := fmt.Sprintf("test-%d", n) value := []byte(fmt.Sprintf("value-%d", n)) - store.Set(ctx, key, value, WithTags([]string{fmt.Sprintf("tag-%d", n)})) + store.Set(ctx, key, value, lib_store.WithTags([]string{fmt.Sprintf("tag-%d", n)})) } }) } diff --git a/test/mocks/store/clients/bigcache_interface.go b/store/bigcache/bigcache_mock.go similarity index 99% rename from test/mocks/store/clients/bigcache_interface.go rename to store/bigcache/bigcache_mock.go index 14c7b6b..5767e2f 100644 --- a/test/mocks/store/clients/bigcache_interface.go +++ b/store/bigcache/bigcache_mock.go @@ -2,7 +2,7 @@ // Source: store/bigcache.go // Package mocks is a generated GoMock package. -package mocks +package bigcache import ( reflect "reflect" diff --git a/store/bigcache_test.go b/store/bigcache/bigcache_test.go similarity index 82% rename from store/bigcache_test.go rename to store/bigcache/bigcache_test.go index 75a05f7..3ccd9c1 100644 --- a/store/bigcache_test.go +++ b/store/bigcache/bigcache_test.go @@ -1,4 +1,4 @@ -package store +package bigcache import ( "context" @@ -6,7 +6,7 @@ import ( "testing" "time" - mocksStore "github.com/eko/gocache/v3/test/mocks/store/clients" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -15,7 +15,7 @@ func TestNewBigcache(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) // When store := NewBigcache(client) @@ -23,7 +23,7 @@ func TestNewBigcache(t *testing.T) { // Then assert.IsType(t, new(BigcacheStore), store) assert.Equal(t, client, store.client) - assert.Equal(t, new(Options), store.options) + assert.Equal(t, new(lib_store.Options), store.options) } func TestBigcacheGet(t *testing.T) { @@ -35,7 +35,7 @@ func TestBigcacheGet(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(cacheValue, nil) store := NewBigcache(client) @@ -58,7 +58,7 @@ func TestBigcacheGetWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, expectedErr) store := NewBigcache(client) @@ -80,7 +80,7 @@ func TestBigcacheGetWithTTL(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(cacheValue, nil) store := NewBigcache(client) @@ -104,7 +104,7 @@ func TestBigcacheGetWithTTLWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, expectedErr) store := NewBigcache(client) @@ -127,7 +127,7 @@ func TestBigcacheSet(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue).Return(nil) store := NewBigcache(client) @@ -150,7 +150,7 @@ func TestBigcacheSetString(t *testing.T) { // The value is string when failback from Redis cacheValue := "my-cache-value" - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, []byte(cacheValue)).Return(nil) store := NewBigcache(client) @@ -173,7 +173,7 @@ func TestBigcacheSetWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue).Return(expectedErr) store := NewBigcache(client) @@ -194,7 +194,7 @@ func TestBigcacheSetWithTags(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue).Return(nil) client.EXPECT().Get("gocache_tag_tag1").Return(nil, nil) client.EXPECT().Set("gocache_tag_tag1", []byte("my-key")).Return(nil) @@ -202,7 +202,7 @@ func TestBigcacheSetWithTags(t *testing.T) { store := NewBigcache(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -217,7 +217,7 @@ func TestBigcacheSetWithTagsWhenAlreadyInserted(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue).Return(nil) client.EXPECT().Get("gocache_tag_tag1").Return([]byte("my-key,a-second-key"), nil) client.EXPECT().Set("gocache_tag_tag1", []byte("my-key,a-second-key")).Return(nil) @@ -225,7 +225,7 @@ func TestBigcacheSetWithTagsWhenAlreadyInserted(t *testing.T) { store := NewBigcache(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -239,7 +239,7 @@ func TestBigcacheDelete(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Delete(cacheKey).Return(nil) store := NewBigcache(client) @@ -261,7 +261,7 @@ func TestBigcacheDeleteWhenError(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Delete(cacheKey).Return(expectedErr) store := NewBigcache(client) @@ -281,7 +281,7 @@ func TestBigcacheInvalidate(t *testing.T) { cacheKeys := []byte("a23fdf987h2svc23,jHG2372x38hf74") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, nil) client.EXPECT().Delete("a23fdf987h2svc23").Return(nil) client.EXPECT().Delete("jHG2372x38hf74").Return(nil) @@ -289,7 +289,7 @@ func TestBigcacheInvalidate(t *testing.T) { store := NewBigcache(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -303,7 +303,7 @@ func TestBigcacheInvalidateWhenError(t *testing.T) { cacheKeys := []byte("a23fdf987h2svc23,jHG2372x38hf74") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, nil) client.EXPECT().Delete("a23fdf987h2svc23").Return(errors.New("unexpected error")) client.EXPECT().Delete("jHG2372x38hf74").Return(nil) @@ -311,7 +311,7 @@ func TestBigcacheInvalidateWhenError(t *testing.T) { store := NewBigcache(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -323,7 +323,7 @@ func TestBigcacheClear(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Reset().Return(nil) store := NewBigcache(client) @@ -343,7 +343,7 @@ func TestBigcacheClearWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) client.EXPECT().Reset().Return(expectedErr) store := NewBigcache(client) @@ -359,7 +359,7 @@ func TestBigcacheGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockBigcacheClientInterface(ctrl) + client := NewMockBigcacheClientInterface(ctrl) store := NewBigcache(client) diff --git a/store/bigcache/go.mod b/store/bigcache/go.mod new file mode 100644 index 0000000..5983291 --- /dev/null +++ b/store/bigcache/go.mod @@ -0,0 +1,19 @@ +module github.com/eko/gocache/v4/store/bigcache + +go 1.19 + +require ( + github.com/allegro/bigcache/v3 v3.1.0 + github.com/eko/gocache/v4/lib v0.0.0 + github.com/golang/mock v1.6.0 + github.com/stretchr/testify v1.8.1 +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/store/bigcache/go.sum b/store/bigcache/go.sum new file mode 100644 index 0000000..7c3fb1c --- /dev/null +++ b/store/bigcache/go.sum @@ -0,0 +1,49 @@ +github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk= +github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/store/freecache.go b/store/freecache/freecache.go similarity index 84% rename from store/freecache.go rename to store/freecache/freecache.go index 4086c86..1445bae 100644 --- a/store/freecache.go +++ b/store/freecache/freecache.go @@ -1,4 +1,4 @@ -package store +package freecache import ( "context" @@ -6,6 +6,8 @@ import ( "fmt" "strings" "time" + + lib_store "github.com/eko/gocache/v4/lib/store" ) const ( @@ -30,14 +32,14 @@ type FreecacheClientInterface interface { // FreecacheStore is a store for freecache type FreecacheStore struct { client FreecacheClientInterface - options *Options + options *lib_store.Options } // NewFreecache creates a new store to freecache instance(s) -func NewFreecache(client FreecacheClientInterface, options ...Option) *FreecacheStore { +func NewFreecache(client FreecacheClientInterface, options ...lib_store.Option) *FreecacheStore { return &FreecacheStore{ client: client, - options: ApplyOptions(options...), + options: lib_store.ApplyOptions(options...), } } @@ -48,7 +50,7 @@ func (f *FreecacheStore) Get(_ context.Context, key any) (any, error) { if k, ok := key.(string); ok { result, err = f.client.Get([]byte(k)) if err != nil { - return nil, NotFoundWithCause(errors.New("value not found in Freecache store")) + return nil, lib_store.NotFoundWithCause(errors.New("value not found in Freecache store")) } return result, err } @@ -61,12 +63,12 @@ func (f *FreecacheStore) GetWithTTL(_ context.Context, key any) (any, time.Durat if k, ok := key.(string); ok { result, err := f.client.Get([]byte(k)) if err != nil { - return nil, 0, NotFoundWithCause(errors.New("value not found in Freecache store")) + return nil, 0, lib_store.NotFoundWithCause(errors.New("value not found in Freecache store")) } ttl, err := f.client.TTL([]byte(k)) if err != nil { - return nil, 0, NotFoundWithCause(errors.New("value not found in Freecache store")) + return nil, 0, lib_store.NotFoundWithCause(errors.New("value not found in Freecache store")) } return result, time.Duration(ttl) * time.Second, err @@ -79,12 +81,12 @@ func (f *FreecacheStore) GetWithTTL(_ context.Context, key any) (any, time.Durat // If the key is larger than 65535 or value is larger than 1/1024 of the cache size, // the entry will not be written to the cache. expireSeconds <= 0 means no expire, // but it can be evicted when cache is full. -func (f *FreecacheStore) Set(ctx context.Context, key any, value any, options ...Option) error { +func (f *FreecacheStore) Set(ctx context.Context, key any, value any, options ...lib_store.Option) error { var err error var val []byte // Using default options set during cache initialization - opts := ApplyOptionsWithDefault(f.options, options...) + opts := lib_store.ApplyOptionsWithDefault(f.options, options...) // type check for value, as freecache only supports value of type []byte switch v := value.(type) { @@ -124,7 +126,7 @@ func (f *FreecacheStore) setTags(ctx context.Context, key any, tags []string) { cacheKeys = append(cacheKeys, key.(string)) } - f.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), WithExpiration(720*time.Hour)) + f.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), lib_store.WithExpiration(720*time.Hour)) } } @@ -150,8 +152,8 @@ func (f *FreecacheStore) Delete(_ context.Context, key any) error { } // Invalidate invalidates some cache data in freecache for given options -func (f *FreecacheStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (f *FreecacheStore) Invalidate(ctx context.Context, options ...lib_store.InvalidateOption) error { + opts := lib_store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { diff --git a/store/freecache_bench_test.go b/store/freecache/freecache_bench_test.go similarity index 80% rename from store/freecache_bench_test.go rename to store/freecache/freecache_bench_test.go index b9b64ff..4cd47d6 100644 --- a/store/freecache_bench_test.go +++ b/store/freecache/freecache_bench_test.go @@ -1,4 +1,4 @@ -package store +package freecache import ( "context" @@ -8,13 +8,14 @@ import ( "time" "github.com/coocood/freecache" + lib_store "github.com/eko/gocache/v4/lib/store" ) func BenchmarkFreecacheSet(b *testing.B) { ctx := context.Background() c := freecache.NewCache(1000) - freecacheStore := NewFreecache(c, WithExpiration(10*time.Second)) + freecacheStore := NewFreecache(c, lib_store.WithExpiration(10*time.Second)) for k := 0.; k <= 10; k++ { n := int(math.Pow(2, k)) @@ -33,7 +34,7 @@ func BenchmarkFreecacheGet(b *testing.B) { ctx := context.Background() c := freecache.NewCache(1000) - freecacheStore := NewFreecache(c, WithExpiration(10*time.Second)) + freecacheStore := NewFreecache(c, lib_store.WithExpiration(10*time.Second)) key := "test" value := []byte("value") diff --git a/test/mocks/store/clients/freecache_interface.go b/store/freecache/freecache_mock.go similarity index 99% rename from test/mocks/store/clients/freecache_interface.go rename to store/freecache/freecache_mock.go index 8702f86..9bc20ec 100644 --- a/test/mocks/store/clients/freecache_interface.go +++ b/store/freecache/freecache_mock.go @@ -2,7 +2,7 @@ // Source: store/freecache.go // Package mocks is a generated GoMock package. -package mocks +package freecache import ( reflect "reflect" diff --git a/store/freecache_test.go b/store/freecache/freecache_test.go similarity index 72% rename from store/freecache_test.go rename to store/freecache/freecache_test.go index 67f9ddc..bb982c1 100644 --- a/store/freecache_test.go +++ b/store/freecache/freecache_test.go @@ -1,4 +1,4 @@ -package store +package freecache import ( "context" @@ -7,7 +7,7 @@ import ( "testing" "time" - mocksStore "github.com/eko/gocache/v3/test/mocks/store/clients" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -16,15 +16,15 @@ func TestNewFreecache(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) // When - store := NewFreecache(client, WithExpiration(6*time.Second)) + store := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) // Then assert.IsType(t, new(FreecacheStore), store) assert.Equal(t, client, store.client) - assert.Equal(t, &Options{ + assert.Equal(t, &lib_store.Options{ Expiration: 6 * time.Second, }, store.options) } @@ -33,7 +33,7 @@ func TestNewFreecacheDefaultOptions(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) // When store := NewFreecache(client) @@ -41,7 +41,7 @@ func TestNewFreecacheDefaultOptions(t *testing.T) { // Then assert.IsType(t, new(FreecacheStore), store) assert.Equal(t, client, store.client) - assert.Equal(t, new(Options), store.options) + assert.Equal(t, new(lib_store.Options), store.options) } func TestFreecacheGet(t *testing.T) { @@ -50,7 +50,7 @@ func TestFreecacheGet(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte("key1")).Return([]byte("val1"), nil) client.EXPECT().Get([]byte("key2")).Return([]byte("val2"), nil) @@ -71,7 +71,7 @@ func TestFreecacheGetNotFound(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte("key1")).Return(nil, errors.New("value not found in store")) s := NewFreecache(client) @@ -87,7 +87,7 @@ func TestFreecacheGetWithInvalidKey(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) s := NewFreecache(client) @@ -105,11 +105,11 @@ func TestFreecacheGetWithTTL(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte(cacheKey)).Return(cacheValue, nil) client.EXPECT().TTL([]byte(cacheKey)).Return(uint32(5), nil) - store := NewFreecache(client, WithExpiration(3*time.Second)) + store := NewFreecache(client, lib_store.WithExpiration(3*time.Second)) // When value, ttl, err := store.GetWithTTL(ctx, cacheKey) @@ -128,16 +128,16 @@ func TestFreecacheGetWithTTLWhenMissingItem(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockFreecacheClientInterface(ctrl) - client.EXPECT().Get([]byte(cacheKey)).Return(nil, NotFound{}) + client := NewMockFreecacheClientInterface(ctrl) + client.EXPECT().Get([]byte(cacheKey)).Return(nil, lib_store.NotFound{}) - store := NewFreecache(client, WithExpiration(3*time.Second)) + store := NewFreecache(client, lib_store.WithExpiration(3*time.Second)) // When value, ttl, err := store.GetWithTTL(ctx, cacheKey) // Then - assert.ErrorIs(t, err, NotFound{}) + assert.ErrorIs(t, err, lib_store.NotFound{}) assert.Nil(t, value) assert.Equal(t, 0*time.Second, ttl) } @@ -151,17 +151,17 @@ func TestFreecacheGetWithTTLWhenErrorAtTTL(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte(cacheKey)).Return(cacheValue, nil) - client.EXPECT().TTL([]byte(cacheKey)).Return(uint32(0), NotFound{}) + client.EXPECT().TTL([]byte(cacheKey)).Return(uint32(0), lib_store.NotFound{}) - store := NewFreecache(client, WithExpiration(3*time.Second)) + store := NewFreecache(client, lib_store.WithExpiration(3*time.Second)) // When value, ttl, err := store.GetWithTTL(ctx, cacheKey) // Then - assert.ErrorIs(t, err, NotFound{}) + assert.ErrorIs(t, err, lib_store.NotFound{}) assert.Nil(t, value) assert.Equal(t, 0*time.Second, ttl) } @@ -172,7 +172,7 @@ func TestFreecacheGetWithTTLWhenInvalidKey(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) s := NewFreecache(client) @@ -191,11 +191,11 @@ func TestFreecacheSet(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Set([]byte(cacheKey), cacheValue, 6).Return(nil) - s := NewFreecache(client, WithExpiration(6*time.Second)) - err := s.Set(ctx, cacheKey, cacheValue, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) + err := s.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(6*time.Second)) assert.Nil(t, err) } @@ -208,7 +208,7 @@ func TestFreecacheSetWithDefaultOptions(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Set([]byte(cacheKey), cacheValue, 0).Return(nil) s := NewFreecache(client) @@ -226,10 +226,10 @@ func TestFreecacheSetInvalidValue(t *testing.T) { cacheValue := "my-cache-value" expectedErr := errors.New("value type not supported by Freecache store") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) - s := NewFreecache(client, WithExpiration(6*time.Second)) - err := s.Set(ctx, cacheKey, cacheValue, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) + err := s.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(6*time.Second)) assert.Equal(t, expectedErr, err) } @@ -242,11 +242,11 @@ func TestFreecacheSetInvalidSize(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") expectedErr := fmt.Errorf("size of key: %v, value: %v, err: %v", cacheKey, cacheValue, errors.New("")) - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Set([]byte(cacheKey), cacheValue, 6).Return(expectedErr) - s := NewFreecache(client, WithExpiration(6*time.Second)) - err := s.Set(ctx, cacheKey, cacheValue, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) + err := s.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(6*time.Second)) assert.NotNil(t, err) } @@ -261,10 +261,10 @@ func TestFreecacheSetInvalidKey(t *testing.T) { expectedErr := errors.New("key type not supported by Freecache store") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) - s := NewFreecache(client, WithExpiration(6*time.Second)) - err := s.Set(ctx, cacheKey, cacheValue, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) + err := s.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(6*time.Second)) assert.Equal(t, expectedErr, err) } @@ -276,7 +276,7 @@ func TestFreecacheDelete(t *testing.T) { cacheKey := "key" - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Del(gomock.Any()).Return(true) s := NewFreecache(client) @@ -292,7 +292,7 @@ func TestFreecacheDeleteFailed(t *testing.T) { cacheKey := "key" expectedErr := fmt.Errorf("failed to delete key %v", cacheKey) - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Del(gomock.Any()).Return(false) s := NewFreecache(client) @@ -308,7 +308,7 @@ func TestFreecacheDeleteInvalidKey(t *testing.T) { cacheKey := 1 expectedErr := errors.New("key type not supported by Freecache store") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) s := NewFreecache(client) err := s.Delete(ctx, cacheKey) @@ -324,13 +324,13 @@ func TestFreecacheSetWithTags(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Set([]byte(cacheKey), cacheValue, 6).Return(nil) client.EXPECT().Get([]byte("freecache_tag_tag1")).MaxTimes(1).Return(nil, errors.New("value not found in store")) client.EXPECT().Set([]byte("freecache_tag_tag1"), []byte("my-key"), 2592000).Return(nil) - s := NewFreecache(client, WithExpiration(6*time.Second)) - err := s.Set(ctx, cacheKey, cacheValue, WithExpiration(6*time.Second), WithTags([]string{"tag1"})) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) + err := s.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(6*time.Second), lib_store.WithTags([]string{"tag1"})) assert.Nil(t, err) } @@ -342,15 +342,15 @@ func TestFreecacheInvalidate(t *testing.T) { cacheKeys := []byte("my-key") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte("freecache_tag_tag1")).Return(cacheKeys, nil) client.EXPECT().Del([]byte("my-key")).Return(true) client.EXPECT().Del([]byte("freecache_tag_tag1")).Return(true) - s := NewFreecache(client, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) // When - err := s.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := s.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -367,13 +367,13 @@ func TestFreecacheTagsAlreadyPresent(t *testing.T) { oldCacheKeys := []byte("key1,key2") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Set([]byte(cacheKey), cacheValue, 6).Return(nil) client.EXPECT().Get([]byte("freecache_tag_tag1")).MaxTimes(1).Return(oldCacheKeys, nil) client.EXPECT().Set([]byte("freecache_tag_tag1"), []byte("key1,key2,my-key"), 2592000).Return(nil) - s := NewFreecache(client, WithExpiration(6*time.Second)) - err := s.Set(ctx, cacheKey, cacheValue, WithExpiration(6*time.Second), WithTags([]string{"tag1"})) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) + err := s.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(6*time.Second), lib_store.WithTags([]string{"tag1"})) assert.Nil(t, err) } @@ -388,13 +388,13 @@ func TestFreecacheTagsRefreshTime(t *testing.T) { oldCacheKeys := []byte("my-key") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Set([]byte(cacheKey), cacheValue, 6).Return(nil) client.EXPECT().Get([]byte("freecache_tag_tag1")).MaxTimes(1).Return(oldCacheKeys, nil) client.EXPECT().Set([]byte("freecache_tag_tag1"), []byte("my-key"), 2592000).Return(nil) - s := NewFreecache(client, WithExpiration(6*time.Second)) - err := s.Set(ctx, cacheKey, cacheValue, WithExpiration(6*time.Second), WithTags([]string{"tag1"})) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) + err := s.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(6*time.Second), lib_store.WithTags([]string{"tag1"})) assert.Nil(t, err) } @@ -406,17 +406,17 @@ func TestFreecacheInvalidateMultipleKeys(t *testing.T) { cacheKeys := []byte("my-key,key1,key2") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte("freecache_tag_tag1")).Return(cacheKeys, nil) client.EXPECT().Del([]byte("my-key")).Return(true) client.EXPECT().Del([]byte("key1")).Return(true) client.EXPECT().Del([]byte("key2")).Return(true) client.EXPECT().Del([]byte("freecache_tag_tag1")).Return(true) - s := NewFreecache(client, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) // When - err := s.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := s.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -430,14 +430,14 @@ func TestFreecacheFailedInvalidateMultipleKeys(t *testing.T) { cacheKeys := []byte("my-key,key1,key2") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte("freecache_tag_tag1")).Return(cacheKeys, nil) client.EXPECT().Del([]byte("my-key")).Return(false) - s := NewFreecache(client, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) // When - err := s.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := s.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.EqualError(t, err, "failed to delete key my-key") @@ -451,17 +451,17 @@ func TestFreecacheFailedInvalidatePattern(t *testing.T) { cacheKeys := []byte("my-key,key1,key2") - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Get([]byte("freecache_tag_tag1")).Return(cacheKeys, nil) client.EXPECT().Del([]byte("my-key")).Return(true) client.EXPECT().Del([]byte("key1")).Return(true) client.EXPECT().Del([]byte("key2")).Return(true) client.EXPECT().Del([]byte("freecache_tag_tag1")).Return(false) - s := NewFreecache(client, WithExpiration(6*time.Second)) + s := NewFreecache(client, lib_store.WithExpiration(6*time.Second)) // When - err := s.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := s.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.EqualError(t, err, "failed to delete key freecache_tag_tag1") @@ -473,7 +473,7 @@ func TestFreecacheClearAll(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) client.EXPECT().Clear() s := NewFreecache(client) @@ -489,7 +489,7 @@ func TestFreecacheGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockFreecacheClientInterface(ctrl) + client := NewMockFreecacheClientInterface(ctrl) s := NewFreecache(client) diff --git a/store/freecache/go.mod b/store/freecache/go.mod new file mode 100644 index 0000000..f81fb4f --- /dev/null +++ b/store/freecache/go.mod @@ -0,0 +1,20 @@ +module github.com/eko/gocache/v4/store/freecache + +go 1.19 + +require ( + github.com/coocood/freecache v1.2.3 + github.com/eko/gocache/v4/lib v0.0.0 + github.com/golang/mock v1.6.0 + github.com/stretchr/testify v1.8.1 +) + +require ( + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ diff --git a/store/freecache/go.sum b/store/freecache/go.sum new file mode 100644 index 0000000..b6959fd --- /dev/null +++ b/store/freecache/go.sum @@ -0,0 +1,50 @@ +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coocood/freecache v1.2.3 h1:lcBwpZrwBZRZyLk/8EMyQVXRiFl663cCuMOrjCALeto= +github.com/coocood/freecache v1.2.3/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/store/go_cache/go.mod b/store/go_cache/go.mod new file mode 100644 index 0000000..179888b --- /dev/null +++ b/store/go_cache/go.mod @@ -0,0 +1,21 @@ +module github.com/eko/gocache/v4/store/go_cache + +go 1.19 + +require ( + github.com/eko/gocache/v4/lib v0.0.0 + github.com/golang/mock v1.6.0 + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/stretchr/testify v1.8.1 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ diff --git a/store/go_cache/go.sum b/store/go_cache/go.sum new file mode 100644 index 0000000..b5f5200 --- /dev/null +++ b/store/go_cache/go.sum @@ -0,0 +1,55 @@ +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/store/go_cache.go b/store/go_cache/go_cache.go similarity index 85% rename from store/go_cache.go rename to store/go_cache/go_cache.go index 790cf31..93c4789 100644 --- a/store/go_cache.go +++ b/store/go_cache/go_cache.go @@ -1,4 +1,4 @@ -package store +package go_cache import ( "context" @@ -6,6 +6,8 @@ import ( "fmt" "sync" "time" + + lib_store "github.com/eko/gocache/v4/lib/store" ) const ( @@ -28,14 +30,14 @@ type GoCacheClientInterface interface { type GoCacheStore struct { mu sync.RWMutex client GoCacheClientInterface - options *Options + options *lib_store.Options } // NewGoCache creates a new store to GoCache (memory) library instance -func NewGoCache(client GoCacheClientInterface, options ...Option) *GoCacheStore { +func NewGoCache(client GoCacheClientInterface, options ...lib_store.Option) *GoCacheStore { return &GoCacheStore{ client: client, - options: ApplyOptions(options...), + options: lib_store.ApplyOptions(options...), } } @@ -45,7 +47,7 @@ func (s *GoCacheStore) Get(_ context.Context, key any) (any, error) { keyStr := key.(string) value, exists := s.client.Get(keyStr) if !exists { - err = NotFoundWithCause(errors.New("value not found in GoCache store")) + err = lib_store.NotFoundWithCause(errors.New("value not found in GoCache store")) } return value, err @@ -55,15 +57,15 @@ func (s *GoCacheStore) Get(_ context.Context, key any) (any, error) { func (s *GoCacheStore) GetWithTTL(_ context.Context, key any) (any, time.Duration, error) { data, t, exists := s.client.GetWithExpiration(key.(string)) if !exists { - return data, 0, NotFoundWithCause(errors.New("value not found in GoCache store")) + return data, 0, lib_store.NotFoundWithCause(errors.New("value not found in GoCache store")) } duration := time.Until(t) return data, duration, nil } // Set defines data in GoCache memoey cache for given key identifier -func (s *GoCacheStore) Set(ctx context.Context, key any, value any, options ...Option) error { - opts := ApplyOptions(options...) +func (s *GoCacheStore) Set(ctx context.Context, key any, value any, options ...lib_store.Option) error { + opts := lib_store.ApplyOptions(options...) if opts == nil { opts = s.options } @@ -114,8 +116,8 @@ func (s *GoCacheStore) Delete(_ context.Context, key any) error { } // Invalidate invalidates some cache data in GoCache memoey cache for given options -func (s *GoCacheStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (s *GoCacheStore) Invalidate(ctx context.Context, options ...lib_store.InvalidateOption) error { + opts := lib_store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { diff --git a/store/go_cache_bench_test.go b/store/go_cache/go_cache_bench_test.go similarity index 82% rename from store/go_cache_bench_test.go rename to store/go_cache/go_cache_bench_test.go index dd2c971..94e40d1 100644 --- a/store/go_cache_bench_test.go +++ b/store/go_cache/go_cache_bench_test.go @@ -1,4 +1,4 @@ -package store +package go_cache import ( "context" @@ -7,7 +7,8 @@ import ( "testing" "time" - "github.com/patrickmn/go-cache" + lib_store "github.com/eko/gocache/v4/lib/store" + cache "github.com/patrickmn/go-cache" ) func BenchmarkGoCacheSet(b *testing.B) { @@ -24,7 +25,7 @@ func BenchmarkGoCacheSet(b *testing.B) { key := fmt.Sprintf("test-%d", n) value := []byte(fmt.Sprintf("value-%d", n)) - store.Set(ctx, key, value, WithTags([]string{fmt.Sprintf("tag-%d", n)})) + store.Set(ctx, key, value, lib_store.WithTags([]string{fmt.Sprintf("tag-%d", n)})) } }) } diff --git a/test/mocks/store/clients/go_cache_interface.go b/store/go_cache/go_cache_mock.go similarity index 99% rename from test/mocks/store/clients/go_cache_interface.go rename to store/go_cache/go_cache_mock.go index e064ba7..e656e8a 100644 --- a/test/mocks/store/clients/go_cache_interface.go +++ b/store/go_cache/go_cache_mock.go @@ -2,7 +2,7 @@ // Source: store/go_cache.go // Package mocks is a generated GoMock package. -package mocks +package go_cache import ( reflect "reflect" diff --git a/store/go_cache_test.go b/store/go_cache/go_cache_test.go similarity index 78% rename from store/go_cache_test.go rename to store/go_cache/go_cache_test.go index 0527862..73c350d 100644 --- a/store/go_cache_test.go +++ b/store/go_cache/go_cache_test.go @@ -1,4 +1,4 @@ -package store +package go_cache import ( "context" @@ -6,7 +6,7 @@ import ( "testing" "time" - mocksStore "github.com/eko/gocache/v3/test/mocks/store/clients" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" @@ -16,14 +16,14 @@ func TestNewGoCache(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) // When - store := NewGoCache(client, WithCost(8)) + store := NewGoCache(client, lib_store.WithCost(8)) // Then assert.IsType(t, new(GoCacheStore), store) assert.Equal(t, client, store.client) - assert.Equal(t, &Options{Cost: 8}, store.options) + assert.Equal(t, &lib_store.Options{Cost: 8}, store.options) } func TestGoCacheGet(t *testing.T) { @@ -35,7 +35,7 @@ func TestGoCacheGet(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(cacheValue, true) store := NewGoCache(client) @@ -56,7 +56,7 @@ func TestGoCacheGetWhenError(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, false) store := NewGoCache(client) @@ -66,7 +66,7 @@ func TestGoCacheGetWhenError(t *testing.T) { // Then assert.Nil(t, value) - assert.ErrorIs(t, err, NotFound{}) + assert.Error(t, err, lib_store.NotFound{}) } func TestGoCacheGetWithTTL(t *testing.T) { @@ -78,7 +78,7 @@ func TestGoCacheGetWithTTL(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().GetWithExpiration(cacheKey).Return(cacheValue, time.Now(), true) store := NewGoCache(client) @@ -100,7 +100,7 @@ func TestGoCacheGetWithTTLWhenError(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().GetWithExpiration(cacheKey).Return(nil, time.Now(), false) store := NewGoCache(client) @@ -110,7 +110,7 @@ func TestGoCacheGetWithTTLWhenError(t *testing.T) { // Then assert.Nil(t, value) - assert.ErrorIs(t, err, NotFound{}) + assert.Error(t, err, lib_store.NotFound{}) assert.Equal(t, 0*time.Second, ttl) } @@ -123,13 +123,13 @@ func TestGoCacheSet(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue, 0*time.Second) store := NewGoCache(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithCost(4)) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithCost(4)) // Then assert.Nil(t, err) @@ -144,7 +144,7 @@ func TestGoCacheSetWhenNoOptionsGiven(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue, 0*time.Second) store := NewGoCache(client) @@ -165,7 +165,7 @@ func TestGoCacheSetWithTags(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue, 0*time.Second) client.EXPECT().Get("gocache_tag_tag1").Return(nil, true) cacheKeys := map[string]struct{}{"my-key": {}} @@ -174,7 +174,7 @@ func TestGoCacheSetWithTags(t *testing.T) { store := NewGoCache(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -189,7 +189,7 @@ func TestGoCacheSetWithTagsWhenAlreadyInserted(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Set(cacheKey, cacheValue, 0*time.Second) cacheKeys := map[string]struct{}{"my-key": {}, "a-second-key": {}} @@ -198,7 +198,7 @@ func TestGoCacheSetWithTagsWhenAlreadyInserted(t *testing.T) { store := NewGoCache(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -212,7 +212,7 @@ func TestGoCacheDelete(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Delete(cacheKey) store := NewGoCache(client) @@ -232,7 +232,7 @@ func TestGoCacheInvalidate(t *testing.T) { cacheKeys := map[string]struct{}{"a23fdf987h2svc23": {}, "jHG2372x38hf74": {}} - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, true) client.EXPECT().Delete("a23fdf987h2svc23") client.EXPECT().Delete("jHG2372x38hf74") @@ -240,7 +240,7 @@ func TestGoCacheInvalidate(t *testing.T) { store := NewGoCache(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -254,13 +254,13 @@ func TestGoCacheInvalidateWhenError(t *testing.T) { cacheKeys := []byte("a23fdf987h2svc23,jHG2372x38hf74") - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, false) store := NewGoCache(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -272,7 +272,7 @@ func TestGoCacheClear(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) client.EXPECT().Flush() store := NewGoCache(client) @@ -288,7 +288,7 @@ func TestGoCacheGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockGoCacheClientInterface(ctrl) + client := NewMockGoCacheClientInterface(ctrl) store := NewGoCache(client) @@ -310,7 +310,7 @@ func TestGoCacheSetTagsConcurrency(t *testing.T) { ctx, key, []string{"one", "two"}, - WithTags([]string{"tag1", "tag2", "tag3"}), + lib_store.WithTags([]string{"tag1", "tag2", "tag3"}), ) assert.Nil(t, err, err) }(i) @@ -333,12 +333,12 @@ func TestGoCacheInvalidateConcurrency(t *testing.T) { go func(i int) { key := fmt.Sprintf("%d", i) - err := store.Set(ctx, key, []string{"one", "two"}, WithTags(tags)) + err := store.Set(ctx, key, []string{"one", "two"}, lib_store.WithTags(tags)) assert.Nil(t, err, err) }(i) go func(i int) { - err := store.Invalidate(ctx, WithInvalidateTags([]string{fmt.Sprintf("tag%d", i)})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{fmt.Sprintf("tag%d", i)})) assert.Nil(t, err, err) }(i) diff --git a/store/memcache/go.mod b/store/memcache/go.mod new file mode 100644 index 0000000..f80c868 --- /dev/null +++ b/store/memcache/go.mod @@ -0,0 +1,20 @@ +module github.com/eko/gocache/v4/store/memcache + +go 1.19 + +require ( + github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822 + github.com/eko/gocache/v4/lib v0.0.0 + github.com/golang/mock v1.6.0 + github.com/stretchr/testify v1.8.1 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ diff --git a/store/memcache/go.sum b/store/memcache/go.sum new file mode 100644 index 0000000..11b6042 --- /dev/null +++ b/store/memcache/go.sum @@ -0,0 +1,50 @@ +github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822 h1:hjXJeBcAMS1WGENGqDpzvmgS43oECTx8UXq31UBu0Jw= +github.com/bradfitz/gomemcache v0.0.0-20221031212613-62deef7fc822/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/store/memcache.go b/store/memcache/memcache.go similarity index 87% rename from store/memcache.go rename to store/memcache/memcache.go index 5f5ec59..835b96b 100644 --- a/store/memcache.go +++ b/store/memcache/memcache.go @@ -1,4 +1,4 @@ -package store +package memcache import ( "context" @@ -7,6 +7,7 @@ import ( "strings" "time" + lib_store "github.com/eko/gocache/v4/lib/store" "golang.org/x/sync/errgroup" "github.com/bradfitz/gomemcache/memcache" @@ -34,14 +35,14 @@ const ( // MemcacheStore is a store for Memcache type MemcacheStore struct { client MemcacheClientInterface - options *Options + options *lib_store.Options } // NewMemcache creates a new store to Memcache instance(s) -func NewMemcache(client MemcacheClientInterface, options ...Option) *MemcacheStore { +func NewMemcache(client MemcacheClientInterface, options ...lib_store.Option) *MemcacheStore { return &MemcacheStore{ client: client, - options: ApplyOptions(options...), + options: lib_store.ApplyOptions(options...), } } @@ -52,7 +53,7 @@ func (s *MemcacheStore) Get(_ context.Context, key any) (any, error) { return nil, err } if item == nil { - return nil, NotFoundWithCause(errors.New("unable to retrieve data from memcache")) + return nil, lib_store.NotFoundWithCause(errors.New("unable to retrieve data from memcache")) } return item.Value, err @@ -65,15 +66,15 @@ func (s *MemcacheStore) GetWithTTL(_ context.Context, key any) (any, time.Durati return nil, 0, err } if item == nil { - return nil, 0, NotFoundWithCause(errors.New("unable to retrieve data from memcache")) + return nil, 0, lib_store.NotFoundWithCause(errors.New("unable to retrieve data from memcache")) } return item.Value, time.Duration(item.Expiration) * time.Second, err } // Set defines data in Memcache for given key identifier -func (s *MemcacheStore) Set(ctx context.Context, key any, value any, options ...Option) error { - opts := ApplyOptionsWithDefault(s.options, options...) +func (s *MemcacheStore) Set(ctx context.Context, key any, value any, options ...lib_store.Option) error { + opts := lib_store.ApplyOptionsWithDefault(s.options, options...) item := &memcache.Item{ Key: key.(string), @@ -162,8 +163,8 @@ func (s *MemcacheStore) Delete(_ context.Context, key any) error { } // Invalidate invalidates some cache data in Memcache for given options -func (s *MemcacheStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (s *MemcacheStore) Invalidate(ctx context.Context, options ...lib_store.InvalidateOption) error { + opts := lib_store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { diff --git a/store/memcache_bench_test.go b/store/memcache/memcache_bench_test.go similarity index 78% rename from store/memcache_bench_test.go rename to store/memcache/memcache_bench_test.go index 58c7238..24d2bc2 100644 --- a/store/memcache_bench_test.go +++ b/store/memcache/memcache_bench_test.go @@ -1,4 +1,4 @@ -package store +package memcache import ( "context" @@ -8,6 +8,7 @@ import ( "time" "github.com/bradfitz/gomemcache/memcache" + lib_store "github.com/eko/gocache/v4/lib/store" ) func BenchmarkMemcacheSet(b *testing.B) { @@ -15,7 +16,7 @@ func BenchmarkMemcacheSet(b *testing.B) { store := NewMemcache( memcache.New("127.0.0.1:11211"), - WithExpiration(100*time.Second), + lib_store.WithExpiration(100*time.Second), ) for k := 0.; k <= 10; k++ { @@ -25,7 +26,7 @@ func BenchmarkMemcacheSet(b *testing.B) { key := fmt.Sprintf("test-%d", n) value := []byte(fmt.Sprintf("value-%d", n)) - store.Set(ctx, key, value, WithTags([]string{fmt.Sprintf("tag-%d", n)})) + store.Set(ctx, key, value, lib_store.WithTags([]string{fmt.Sprintf("tag-%d", n)})) } }) } @@ -36,7 +37,7 @@ func BenchmarkMemcacheGet(b *testing.B) { store := NewMemcache( memcache.New("127.0.0.1:11211"), - WithExpiration(100*time.Second), + lib_store.WithExpiration(100*time.Second), ) key := "test" diff --git a/test/mocks/store/clients/memcache_interface.go b/store/memcache/memcache_mock.go similarity index 99% rename from test/mocks/store/clients/memcache_interface.go rename to store/memcache/memcache_mock.go index 861b24e..281b66b 100644 --- a/test/mocks/store/clients/memcache_interface.go +++ b/store/memcache/memcache_mock.go @@ -2,7 +2,7 @@ // Source: store/memcache.go // Package mocks is a generated GoMock package. -package mocks +package memcache import ( reflect "reflect" diff --git a/store/memcache_test.go b/store/memcache/memcache_test.go similarity index 76% rename from store/memcache_test.go rename to store/memcache/memcache_test.go index cb382b6..5466bd0 100644 --- a/store/memcache_test.go +++ b/store/memcache/memcache_test.go @@ -1,4 +1,4 @@ -package store +package memcache import ( "context" @@ -7,7 +7,7 @@ import ( "time" "github.com/bradfitz/gomemcache/memcache" - mocksStore "github.com/eko/gocache/v3/test/mocks/store/clients" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -16,15 +16,15 @@ func TestNewMemcache(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) // When - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // Then assert.IsType(t, new(MemcacheStore), store) assert.Equal(t, client, store.client) - assert.Equal(t, &Options{Expiration: 3 * time.Second}, store.options) + assert.Equal(t, &lib_store.Options{Expiration: 3 * time.Second}, store.options) } func TestMemcacheGet(t *testing.T) { @@ -36,12 +36,12 @@ func TestMemcacheGet(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(&memcache.Item{ Value: cacheValue, }, nil) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When value, err := store.Get(ctx, cacheKey) @@ -61,10 +61,10 @@ func TestMemcacheGetWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, expectedErr) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When value, err := store.Get(ctx, cacheKey) @@ -83,13 +83,13 @@ func TestMemcacheGetWithTTL(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(&memcache.Item{ Value: cacheValue, Expiration: int32(5), }, nil) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When value, ttl, err := store.GetWithTTL(ctx, cacheKey) @@ -108,10 +108,10 @@ func TestMemcacheGetWithTTLWhenMissingItem(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, nil) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When value, ttl, err := store.GetWithTTL(ctx, cacheKey) @@ -132,10 +132,10 @@ func TestMemcacheGetWithTTLWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, expectedErr) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When value, ttl, err := store.GetWithTTL(ctx, cacheKey) @@ -155,17 +155,17 @@ func TestMemcacheSet(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Set(&memcache.Item{ Key: cacheKey, Value: cacheValue, Expiration: int32(5), }).Return(nil) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When - err := store.Set(ctx, cacheKey, cacheValue, WithExpiration(5*time.Second)) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(5*time.Second)) // Then assert.Nil(t, err) @@ -180,14 +180,14 @@ func TestMemcacheSetWhenNoOptionsGiven(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Set(&memcache.Item{ Key: cacheKey, Value: cacheValue, Expiration: int32(3), }).Return(nil) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When err := store.Set(ctx, cacheKey, cacheValue) @@ -207,14 +207,14 @@ func TestMemcacheSetWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Set(&memcache.Item{ Key: cacheKey, Value: cacheValue, Expiration: int32(3), }).Return(expectedErr) - store := NewMemcache(client, WithExpiration(3*time.Second)) + store := NewMemcache(client, lib_store.WithExpiration(3*time.Second)) // When err := store.Set(ctx, cacheKey, cacheValue) @@ -234,7 +234,7 @@ func TestMemcacheSetWithTags(t *testing.T) { tagKey := "gocache_tag_tag1" - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Set(gomock.Any()).AnyTimes().Return(nil) client.EXPECT().Get(tagKey).Return(nil, memcache.ErrCacheMiss) client.EXPECT().Add(&memcache.Item{ @@ -246,7 +246,7 @@ func TestMemcacheSetWithTags(t *testing.T) { store := NewMemcache(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -261,7 +261,7 @@ func TestMemcacheSetWithTagsWhenAlreadyInserted(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Set(gomock.Any()).AnyTimes().Return(nil) client.EXPECT().Get("gocache_tag_tag1").Return(&memcache.Item{ Value: []byte("my-key,a-second-key"), @@ -270,7 +270,7 @@ func TestMemcacheSetWithTagsWhenAlreadyInserted(t *testing.T) { store := NewMemcache(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -284,7 +284,7 @@ func TestMemcacheDelete(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Delete(cacheKey).Return(nil) store := NewMemcache(client) @@ -306,7 +306,7 @@ func TestMemcacheDeleteWhenError(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Delete(cacheKey).Return(expectedErr) store := NewMemcache(client) @@ -328,7 +328,7 @@ func TestMemcacheInvalidate(t *testing.T) { Value: []byte("a23fdf987h2svc23,jHG2372x38hf74"), } - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, nil) client.EXPECT().Delete("a23fdf987h2svc23").Return(nil) client.EXPECT().Delete("jHG2372x38hf74").Return(nil) @@ -336,7 +336,7 @@ func TestMemcacheInvalidate(t *testing.T) { store := NewMemcache(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -352,7 +352,7 @@ func TestMemcacheInvalidateWhenError(t *testing.T) { Value: []byte("a23fdf987h2svc23,jHG2372x38hf74"), } - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, nil) client.EXPECT().Delete("a23fdf987h2svc23").Return(errors.New("unexpected error")) client.EXPECT().Delete("jHG2372x38hf74").Return(nil) @@ -360,7 +360,7 @@ func TestMemcacheInvalidateWhenError(t *testing.T) { store := NewMemcache(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -372,7 +372,7 @@ func TestMemcacheClear(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().FlushAll().Return(nil) store := NewMemcache(client) @@ -392,7 +392,7 @@ func TestMemcacheClearWhenError(t *testing.T) { expectedErr := errors.New("an unexpected error occurred") - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) client.EXPECT().FlushAll().Return(expectedErr) store := NewMemcache(client) @@ -408,7 +408,7 @@ func TestMemcacheGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockMemcacheClientInterface(ctrl) + client := NewMockMemcacheClientInterface(ctrl) store := NewMemcache(client) diff --git a/store/pegasus/go.mod b/store/pegasus/go.mod new file mode 100644 index 0000000..9b2213f --- /dev/null +++ b/store/pegasus/go.mod @@ -0,0 +1,29 @@ +module github.com/eko/gocache/v4/store/pegasus + +go 1.19 + +require ( + github.com/XiaoMi/pegasus-go-client v0.0.0-20220519103347-ba0e68465cd5 + github.com/eko/gocache/v4/lib v0.0.0 + github.com/smartystreets/assertions v1.13.0 + github.com/smartystreets/goconvey v1.7.2 + github.com/spf13/cast v1.5.0 +) + +require ( + github.com/cenkalti/backoff/v4 v4.1.0 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.1 // indirect + github.com/pegasus-kv/thrift v0.13.0 // indirect + github.com/sirupsen/logrus v1.4.2 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.1.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect + k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 // indirect +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ diff --git a/store/pegasus/go.sum b/store/pegasus/go.sum new file mode 100644 index 0000000..628f953 --- /dev/null +++ b/store/pegasus/go.sum @@ -0,0 +1,166 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/XiaoMi/pegasus-go-client v0.0.0-20220519103347-ba0e68465cd5 h1:mePTwVjIZ65Ko7gK9MWh/xV/ubEe9yns69Slzr4B1aI= +github.com/XiaoMi/pegasus-go-client v0.0.0-20220519103347-ba0e68465cd5/go.mod h1:VrfgKISflRhFm32m3e0SXLccvNJTyG8PRywWbUuGEfY= +github.com/agiledragon/gomonkey v2.0.2+incompatible h1:eXKi9/piiC3cjJD1658mEE2o3NjkJ5vDLgYjCQu0Xlw= +github.com/agiledragon/gomonkey v2.0.2+incompatible/go.mod h1:2NGfXu1a80LLr2cmWXGBDaHEjb1idR6+FVlX5T3D9hw= +github.com/cenkalti/backoff/v4 v4.1.0 h1:c8LkOFQTzuO0WBM/ae5HdGQuZPfPxp7lqBRwQRm4fSc= +github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pegasus-kv/thrift v0.13.0 h1:4ESwaNoHImfbHa9RUGJiJZ4hrxorihZHk5aarYwY8d4= +github.com/pegasus-kv/thrift v0.13.0/go.mod h1:Gl9NT/WHG6ABm6NsrbfE8LiJN0sAyneCrvB4qN4NPqQ= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= +github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191105084925-a882066a44e0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 h1:FErmbNIJruD5GT2oVEjtPn5Ar5+rcWJsC8/PPUkR0s4= +k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/store/pegasus.go b/store/pegasus/pegasus.go similarity index 94% rename from store/pegasus.go rename to store/pegasus/pegasus.go index f33bcf8..31ee3d8 100644 --- a/store/pegasus.go +++ b/store/pegasus/pegasus.go @@ -1,4 +1,4 @@ -package store +package pegasus import ( "context" @@ -10,6 +10,8 @@ import ( "github.com/XiaoMi/pegasus-go-client/admin" "github.com/XiaoMi/pegasus-go-client/pegasus" "github.com/spf13/cast" + + lib_store "github.com/eko/gocache/v4/lib/store" ) const ( @@ -31,7 +33,7 @@ var empty = []byte("-") // OptionsPegasus is options of Pegasus type OptionsPegasus struct { - *Options + *lib_store.Options MetaServers []string TableName string @@ -138,7 +140,7 @@ func (p *PegasusStore) Get(ctx context.Context, key any) (any, error) { return nil, err } if value == nil { - return nil, &NotFound{} + return nil, &lib_store.NotFound{} } return value, nil } @@ -156,7 +158,7 @@ func (p *PegasusStore) GetWithTTL(ctx context.Context, key any) (any, time.Durat return nil, 0, err } if value == nil { - return nil, 0, &NotFound{} + return nil, 0, &lib_store.NotFound{} } ttl, err := table.TTL(ctx, []byte(cast.ToString(key)), empty) @@ -168,8 +170,8 @@ func (p *PegasusStore) GetWithTTL(ctx context.Context, key any) (any, time.Durat } // Set defines data in Pegasus for given key identifier -func (p *PegasusStore) Set(ctx context.Context, key, value any, options ...Option) error { - opts := ApplyOptions(options...) +func (p *PegasusStore) Set(ctx context.Context, key, value any, options ...lib_store.Option) error { + opts := lib_store.ApplyOptions(options...) table, err := p.client.OpenTable(ctx, p.options.TableName) if err != nil { @@ -213,7 +215,7 @@ func (p *PegasusStore) setTags(ctx context.Context, key any, tags []string) erro cacheKeys = append(cacheKeys, key.(string)) } - if err := p.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), WithExpiration(720*time.Hour)); err != nil { + if err := p.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), lib_store.WithExpiration(720*time.Hour)); err != nil { return err } } @@ -233,8 +235,8 @@ func (p *PegasusStore) Delete(ctx context.Context, key any) error { } // Invalidate invalidates some cache data in Pegasus for given options -func (p *PegasusStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (p *PegasusStore) Invalidate(ctx context.Context, options ...lib_store.InvalidateOption) error { + opts := lib_store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { tagKey := fmt.Sprintf(PegasusTagPattern, tag) diff --git a/store/pegasus_bench_test.go b/store/pegasus/pegasus_bench_test.go similarity index 85% rename from store/pegasus_bench_test.go rename to store/pegasus/pegasus_bench_test.go index f054870..5f168de 100644 --- a/store/pegasus_bench_test.go +++ b/store/pegasus/pegasus_bench_test.go @@ -1,10 +1,12 @@ -package store +package pegasus import ( "context" "fmt" "math" "testing" + + lib_store "github.com/eko/gocache/v4/lib/store" ) // run go test -bench='BenchmarkPegasusStore*' -benchtime=1s -count=1 -run=none @@ -21,7 +23,7 @@ func BenchmarkPegasusStore_Set(b *testing.B) { key := fmt.Sprintf("test-%d", n) value := []byte(fmt.Sprintf("value-%d", n)) - p.Set(ctx, key, value, WithTags([]string{fmt.Sprintf("tag-%d", n)})) + p.Set(ctx, key, value, lib_store.WithTags([]string{fmt.Sprintf("tag-%d", n)})) } }) } diff --git a/store/pegasus_test.go b/store/pegasus/pegasus_test.go similarity index 96% rename from store/pegasus_test.go rename to store/pegasus/pegasus_test.go index f7c48c3..afd54be 100644 --- a/store/pegasus_test.go +++ b/store/pegasus/pegasus_test.go @@ -1,10 +1,11 @@ -package store +package pegasus import ( "context" "testing" "time" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/smartystreets/assertions/should" . "github.com/smartystreets/goconvey/convey" "github.com/spf13/cast" @@ -108,7 +109,7 @@ func TestPegasusStore_GetWithTTL(t *testing.T) { Convey("test set ttl that not achieve", func() { k, v, retention := "test-gocache-key-01", "test-gocache-value", time.Minute*10 - p.Set(ctx, k, v, WithExpiration(retention)) + p.Set(ctx, k, v, lib_store.WithExpiration(retention)) value, ttl, err := p.GetWithTTL(ctx, k) So(cast.ToString(value), ShouldEqual, v) @@ -126,7 +127,7 @@ func TestPegasusStore_GetWithTTL(t *testing.T) { }) Convey("test set ttl that already achieve", func() { k, v, retention := "test-gocache-key-03", "test-gocache-value", time.Millisecond*10 - p.Set(ctx, k, v, WithExpiration(retention)) + p.Set(ctx, k, v, lib_store.WithExpiration(retention)) time.Sleep(time.Second * 1) value, ttl, err := p.GetWithTTL(ctx, k) diff --git a/store/redis/go.mod b/store/redis/go.mod new file mode 100644 index 0000000..7eda05e --- /dev/null +++ b/store/redis/go.mod @@ -0,0 +1,21 @@ +module github.com/eko/gocache/v4/store/redis + +go 1.19 + +require ( + github.com/eko/gocache/v4/lib v0.0.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/golang/mock v1.6.0 + github.com/stretchr/testify v1.8.1 +) + +require ( + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ diff --git a/store/redis/go.sum b/store/redis/go.sum new file mode 100644 index 0000000..bb37b6c --- /dev/null +++ b/store/redis/go.sum @@ -0,0 +1,59 @@ +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/store/redis.go b/store/redis/redis.go similarity index 86% rename from store/redis.go rename to store/redis/redis.go index 3ba703d..34e3608 100644 --- a/store/redis.go +++ b/store/redis/redis.go @@ -1,10 +1,11 @@ -package store +package redis import ( "context" "fmt" "time" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/go-redis/redis/v8" ) @@ -30,14 +31,14 @@ const ( // RedisStore is a store for Redis type RedisStore struct { client RedisClientInterface - options *Options + options *lib_store.Options } // NewRedis creates a new store to Redis instance(s) -func NewRedis(client RedisClientInterface, options ...Option) *RedisStore { +func NewRedis(client RedisClientInterface, options ...lib_store.Option) *RedisStore { return &RedisStore{ client: client, - options: ApplyOptions(options...), + options: lib_store.ApplyOptions(options...), } } @@ -45,7 +46,7 @@ func NewRedis(client RedisClientInterface, options ...Option) *RedisStore { func (s *RedisStore) Get(ctx context.Context, key any) (any, error) { object, err := s.client.Get(ctx, key.(string)).Result() if err == redis.Nil { - return nil, NotFoundWithCause(err) + return nil, lib_store.NotFoundWithCause(err) } return object, err } @@ -54,7 +55,7 @@ func (s *RedisStore) Get(ctx context.Context, key any) (any, error) { func (s *RedisStore) GetWithTTL(ctx context.Context, key any) (any, time.Duration, error) { object, err := s.client.Get(ctx, key.(string)).Result() if err == redis.Nil { - return nil, 0, NotFoundWithCause(err) + return nil, 0, lib_store.NotFoundWithCause(err) } if err != nil { return nil, 0, err @@ -69,8 +70,8 @@ func (s *RedisStore) GetWithTTL(ctx context.Context, key any) (any, time.Duratio } // Set defines data in Redis for given key identifier -func (s *RedisStore) Set(ctx context.Context, key any, value any, options ...Option) error { - opts := ApplyOptionsWithDefault(s.options, options...) +func (s *RedisStore) Set(ctx context.Context, key any, value any, options ...lib_store.Option) error { + opts := lib_store.ApplyOptionsWithDefault(s.options, options...) err := s.client.Set(ctx, key.(string), value, opts.Expiration).Err() if err != nil { @@ -99,8 +100,8 @@ func (s *RedisStore) Delete(ctx context.Context, key any) error { } // Invalidate invalidates some cache data in Redis for given options -func (s *RedisStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (s *RedisStore) Invalidate(ctx context.Context, options ...lib_store.InvalidateOption) error { + opts := lib_store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { diff --git a/store/redis_bench_test.go b/store/redis/redis_bench_test.go similarity index 85% rename from store/redis_bench_test.go rename to store/redis/redis_bench_test.go index f91d7ba..c9fb8b2 100644 --- a/store/redis_bench_test.go +++ b/store/redis/redis_bench_test.go @@ -1,4 +1,4 @@ -package store +package redis import ( "context" @@ -6,6 +6,7 @@ import ( "math" "testing" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/go-redis/redis/v8" ) @@ -23,7 +24,7 @@ func BenchmarkRedisSet(b *testing.B) { key := fmt.Sprintf("test-%d", n) value := []byte(fmt.Sprintf("value-%d", n)) - store.Set(ctx, key, value, WithTags([]string{fmt.Sprintf("tag-%d", n)})) + store.Set(ctx, key, value, lib_store.WithTags([]string{fmt.Sprintf("tag-%d", n)})) } }) } diff --git a/test/mocks/store/clients/redis_interface.go b/store/redis/redis_mock.go similarity index 99% rename from test/mocks/store/clients/redis_interface.go rename to store/redis/redis_mock.go index 9d693a8..eb7b996 100644 --- a/test/mocks/store/clients/redis_interface.go +++ b/store/redis/redis_mock.go @@ -2,7 +2,7 @@ // Source: store/redis.go // Package mocks is a generated GoMock package. -package mocks +package redis import ( context "context" diff --git a/store/redis_test.go b/store/redis/redis_test.go similarity index 73% rename from store/redis_test.go rename to store/redis/redis_test.go index 9877097..f8ae497 100644 --- a/store/redis_test.go +++ b/store/redis/redis_test.go @@ -1,29 +1,30 @@ -package store +package redis import ( "context" "testing" "time" - mocksStore "github.com/eko/gocache/v3/test/mocks/store/clients" "github.com/go-redis/redis/v8" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + + lib_store "github.com/eko/gocache/v4/lib/store" ) func TestNewRedis(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) // When - store := NewRedis(client, WithExpiration(6*time.Second)) + store := NewRedis(client, lib_store.WithExpiration(6*time.Second)) // Then assert.IsType(t, new(RedisStore), store) assert.Equal(t, client, store.client) - assert.Equal(t, &Options{Expiration: 6 * time.Second}, store.options) + assert.Equal(t, &lib_store.Options{Expiration: 6 * time.Second}, store.options) } func TestRedisGet(t *testing.T) { @@ -32,7 +33,7 @@ func TestRedisGet(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) client.EXPECT().Get(ctx, "my-key").Return(&redis.StringCmd{}) store := NewRedis(client) @@ -54,13 +55,13 @@ func TestRedisSet(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) client.EXPECT().Set(ctx, "my-key", cacheValue, 5*time.Second).Return(&redis.StatusCmd{}) - store := NewRedis(client, WithExpiration(6*time.Second)) + store := NewRedis(client, lib_store.WithExpiration(6*time.Second)) // When - err := store.Set(ctx, cacheKey, cacheValue, WithExpiration(5*time.Second)) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(5*time.Second)) // Then assert.Nil(t, err) @@ -75,10 +76,10 @@ func TestRedisSetWhenNoOptionsGiven(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) client.EXPECT().Set(ctx, "my-key", cacheValue, 6*time.Second).Return(&redis.StatusCmd{}) - store := NewRedis(client, WithExpiration(6*time.Second)) + store := NewRedis(client, lib_store.WithExpiration(6*time.Second)) // When err := store.Set(ctx, cacheKey, cacheValue) @@ -96,7 +97,7 @@ func TestRedisSetWithTags(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) client.EXPECT().Set(ctx, cacheKey, cacheValue, time.Duration(0)).Return(&redis.StatusCmd{}) client.EXPECT().SAdd(ctx, "gocache_tag_tag1", "my-key").Return(&redis.IntCmd{}) client.EXPECT().Expire(ctx, "gocache_tag_tag1", 720*time.Hour).Return(&redis.BoolCmd{}) @@ -104,7 +105,7 @@ func TestRedisSetWithTags(t *testing.T) { store := NewRedis(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -118,7 +119,7 @@ func TestRedisDelete(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) client.EXPECT().Del(ctx, "my-key").Return(&redis.IntCmd{}) store := NewRedis(client) @@ -138,14 +139,14 @@ func TestRedisInvalidate(t *testing.T) { cacheKeys := &redis.StringSliceCmd{} - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) client.EXPECT().SMembers(ctx, "gocache_tag_tag1").Return(cacheKeys) client.EXPECT().Del(ctx, "gocache_tag_tag1").Return(&redis.IntCmd{}) store := NewRedis(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -157,7 +158,7 @@ func TestRedisClear(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) client.EXPECT().FlushAll(ctx).Return(&redis.StatusCmd{}) store := NewRedis(client) @@ -173,7 +174,7 @@ func TestRedisGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockRedisClientInterface(ctrl) + client := NewMockRedisClientInterface(ctrl) store := NewRedis(client) diff --git a/store/rediscluster/go.mod b/store/rediscluster/go.mod new file mode 100644 index 0000000..3815e5f --- /dev/null +++ b/store/rediscluster/go.mod @@ -0,0 +1,21 @@ +module github.com/eko/gocache/v4/store/rediscluster + +go 1.19 + +require ( + github.com/eko/gocache/v4/lib v0.0.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/golang/mock v1.6.0 + github.com/stretchr/testify v1.8.1 +) + +require ( + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ diff --git a/store/rediscluster/go.sum b/store/rediscluster/go.sum new file mode 100644 index 0000000..bb37b6c --- /dev/null +++ b/store/rediscluster/go.sum @@ -0,0 +1,59 @@ +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/store/rediscluster.go b/store/rediscluster/rediscluster.go similarity index 84% rename from store/rediscluster.go rename to store/rediscluster/rediscluster.go index 600ac77..69de999 100644 --- a/store/rediscluster.go +++ b/store/rediscluster/rediscluster.go @@ -1,10 +1,11 @@ -package store +package rediscluster import ( "context" "fmt" "time" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/go-redis/redis/v8" ) @@ -30,14 +31,14 @@ const ( // RedisStore is a store for Redis type RedisClusterStore struct { clusclient RedisClusterClientInterface - options *Options + options *lib_store.Options } // NewRedis creates a new store to Redis instance(s) -func NewRedisCluster(client RedisClusterClientInterface, options ...Option) *RedisClusterStore { +func NewRedisCluster(client RedisClusterClientInterface, options ...lib_store.Option) *RedisClusterStore { return &RedisClusterStore{ clusclient: client, - options: ApplyOptions(options...), + options: lib_store.ApplyOptions(options...), } } @@ -45,7 +46,7 @@ func NewRedisCluster(client RedisClusterClientInterface, options ...Option) *Red func (s *RedisClusterStore) Get(ctx context.Context, key any) (any, error) { object, err := s.clusclient.Get(ctx, key.(string)).Result() if err == redis.Nil { - return nil, NotFoundWithCause(err) + return nil, lib_store.NotFoundWithCause(err) } return object, err } @@ -54,7 +55,7 @@ func (s *RedisClusterStore) Get(ctx context.Context, key any) (any, error) { func (s *RedisClusterStore) GetWithTTL(ctx context.Context, key any) (any, time.Duration, error) { object, err := s.clusclient.Get(ctx, key.(string)).Result() if err == redis.Nil { - return nil, 0, NotFoundWithCause(err) + return nil, 0, lib_store.NotFoundWithCause(err) } if err != nil { return nil, 0, err @@ -69,8 +70,8 @@ func (s *RedisClusterStore) GetWithTTL(ctx context.Context, key any) (any, time. } // Set defines data in Redis for given key identifier -func (s *RedisClusterStore) Set(ctx context.Context, key any, value any, options ...Option) error { - opts := ApplyOptionsWithDefault(s.options, options...) +func (s *RedisClusterStore) Set(ctx context.Context, key any, value any, options ...lib_store.Option) error { + opts := lib_store.ApplyOptionsWithDefault(s.options, options...) err := s.clusclient.Set(ctx, key.(string), value, opts.Expiration).Err() if err != nil { @@ -86,7 +87,7 @@ func (s *RedisClusterStore) Set(ctx context.Context, key any, value any, options func (s *RedisClusterStore) setTags(ctx context.Context, key any, tags []string) { for _, tag := range tags { - tagKey := fmt.Sprintf(RedisTagPattern, tag) + tagKey := fmt.Sprintf(RedisClusterTagPattern, tag) s.clusclient.SAdd(ctx, tagKey, key.(string)) s.clusclient.Expire(ctx, tagKey, 720*time.Hour) } @@ -99,12 +100,12 @@ func (s *RedisClusterStore) Delete(ctx context.Context, key any) error { } // Invalidate invalidates some cache data in Redis for given options -func (s *RedisClusterStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (s *RedisClusterStore) Invalidate(ctx context.Context, options ...lib_store.InvalidateOption) error { + opts := lib_store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { - tagKey := fmt.Sprintf(RedisTagPattern, tag) + tagKey := fmt.Sprintf(RedisClusterTagPattern, tag) cacheKeys, err := s.clusclient.SMembers(ctx, tagKey).Result() if err != nil { continue diff --git a/store/rediscluster_bench_test.go b/store/rediscluster/rediscluster_bench_test.go similarity index 87% rename from store/rediscluster_bench_test.go rename to store/rediscluster/rediscluster_bench_test.go index c494035..27b64ba 100644 --- a/store/rediscluster_bench_test.go +++ b/store/rediscluster/rediscluster_bench_test.go @@ -1,4 +1,4 @@ -package store +package rediscluster import ( "context" @@ -7,6 +7,7 @@ import ( "strings" "testing" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/go-redis/redis/v8" ) @@ -26,7 +27,7 @@ func BenchmarkRedisClusterSet(b *testing.B) { key := fmt.Sprintf("test-%d", n) value := []byte(fmt.Sprintf("value-%d", n)) - store.Set(ctx, key, value, WithTags([]string{fmt.Sprintf("tag-%d", n)})) + store.Set(ctx, key, value, lib_store.WithTags([]string{fmt.Sprintf("tag-%d", n)})) } }) } diff --git a/test/mocks/store/clients/rediscluster_interface.go b/store/rediscluster/rediscluster_mock.go similarity index 99% rename from test/mocks/store/clients/rediscluster_interface.go rename to store/rediscluster/rediscluster_mock.go index 31e0b91..5fb148d 100644 --- a/test/mocks/store/clients/rediscluster_interface.go +++ b/store/rediscluster/rediscluster_mock.go @@ -2,7 +2,7 @@ // Source: store/rediscluster.go // Package mocks is a generated GoMock package. -package mocks +package rediscluster import ( context "context" diff --git a/store/rediscluster_test.go b/store/rediscluster/rediscluster_test.go similarity index 72% rename from store/rediscluster_test.go rename to store/rediscluster/rediscluster_test.go index 78edf69..16f6ad8 100644 --- a/store/rediscluster_test.go +++ b/store/rediscluster/rediscluster_test.go @@ -1,11 +1,11 @@ -package store +package rediscluster import ( "context" "testing" "time" - mocksStore "github.com/eko/gocache/v3/test/mocks/store/clients" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/go-redis/redis/v8" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -15,15 +15,15 @@ func TestNewRedisCluster(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) // When - store := NewRedisCluster(client, WithExpiration(6*time.Second)) + store := NewRedisCluster(client, lib_store.WithExpiration(6*time.Second)) // Then assert.IsType(t, new(RedisClusterStore), store) assert.Equal(t, client, store.clusclient) - assert.Equal(t, &Options{Expiration: 6 * time.Second}, store.options) + assert.Equal(t, &lib_store.Options{Expiration: 6 * time.Second}, store.options) } func TestRedisClusterGet(t *testing.T) { @@ -32,7 +32,7 @@ func TestRedisClusterGet(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) client.EXPECT().Get(ctx, "my-key").Return(&redis.StringCmd{}) store := NewRedisCluster(client) @@ -54,13 +54,13 @@ func TestRedisClusterSet(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) client.EXPECT().Set(ctx, "my-key", cacheValue, 5*time.Second).Return(&redis.StatusCmd{}) - store := NewRedisCluster(client, WithExpiration(6*time.Second)) + store := NewRedisCluster(client, lib_store.WithExpiration(6*time.Second)) // When - err := store.Set(ctx, cacheKey, cacheValue, WithExpiration(5*time.Second)) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithExpiration(5*time.Second)) // Then assert.Nil(t, err) @@ -75,10 +75,10 @@ func TestRedisClusterSetWhenNoOptionsGiven(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) client.EXPECT().Set(ctx, "my-key", cacheValue, 6*time.Second).Return(&redis.StatusCmd{}) - store := NewRedisCluster(client, WithExpiration(6*time.Second)) + store := NewRedisCluster(client, lib_store.WithExpiration(6*time.Second)) // When err := store.Set(ctx, cacheKey, cacheValue) @@ -96,7 +96,7 @@ func TestRedisClusterSetWithTags(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) client.EXPECT().Set(ctx, cacheKey, cacheValue, time.Duration(0)).Return(&redis.StatusCmd{}) client.EXPECT().SAdd(ctx, "gocache_tag_tag1", "my-key").Return(&redis.IntCmd{}) client.EXPECT().Expire(ctx, "gocache_tag_tag1", 720*time.Hour).Return(&redis.BoolCmd{}) @@ -104,7 +104,7 @@ func TestRedisClusterSetWithTags(t *testing.T) { store := NewRedisCluster(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -118,7 +118,7 @@ func TestRedisClusterDelete(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) client.EXPECT().Del(ctx, "my-key").Return(&redis.IntCmd{}) store := NewRedisCluster(client) @@ -138,14 +138,14 @@ func TestRedisClusterInvalidate(t *testing.T) { cacheKeys := &redis.StringSliceCmd{} - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) client.EXPECT().SMembers(ctx, "gocache_tag_tag1").Return(cacheKeys) client.EXPECT().Del(ctx, "gocache_tag_tag1").Return(&redis.IntCmd{}) store := NewRedisCluster(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -157,7 +157,7 @@ func TestRedisClusterClear(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) client.EXPECT().FlushAll(ctx).Return(&redis.StatusCmd{}) store := NewRedisCluster(client) @@ -173,7 +173,7 @@ func TestRedisClusterGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockRedisClusterClientInterface(ctrl) + client := NewMockRedisClusterClientInterface(ctrl) store := NewRedisCluster(client) diff --git a/store/ristretto/go.mod b/store/ristretto/go.mod new file mode 100644 index 0000000..51e6289 --- /dev/null +++ b/store/ristretto/go.mod @@ -0,0 +1,24 @@ +module github.com/eko/gocache/v4/store/ristretto + +go 1.19 + +require ( + github.com/dgraph-io/ristretto v0.1.1 + github.com/eko/gocache/v4/lib v0.0.0 + github.com/golang/mock v1.6.0 + github.com/stretchr/testify v1.8.1 +) + +require ( + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + golang.org/x/sys v0.1.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/eko/gocache/v4/lib => ../../lib/ diff --git a/store/ristretto/go.sum b/store/ristretto/go.sum new file mode 100644 index 0000000..e83b6a2 --- /dev/null +++ b/store/ristretto/go.sum @@ -0,0 +1,64 @@ +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= +golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/store/ristretto.go b/store/ristretto/ristretto.go similarity index 83% rename from store/ristretto.go rename to store/ristretto/ristretto.go index 6c334fa..c93b1d2 100644 --- a/store/ristretto.go +++ b/store/ristretto/ristretto.go @@ -1,4 +1,4 @@ -package store +package ristretto import ( "context" @@ -6,6 +6,8 @@ import ( "fmt" "strings" "time" + + lib_store "github.com/eko/gocache/v4/lib/store" ) const ( @@ -26,14 +28,14 @@ type RistrettoClientInterface interface { // RistrettoStore is a store for Ristretto (memory) library type RistrettoStore struct { client RistrettoClientInterface - options *Options + options *lib_store.Options } // NewRistretto creates a new store to Ristretto (memory) library instance -func NewRistretto(client RistrettoClientInterface, options ...Option) *RistrettoStore { +func NewRistretto(client RistrettoClientInterface, options ...lib_store.Option) *RistrettoStore { return &RistrettoStore{ client: client, - options: ApplyOptions(options...), + options: lib_store.ApplyOptions(options...), } } @@ -43,7 +45,7 @@ func (s *RistrettoStore) Get(_ context.Context, key any) (any, error) { value, exists := s.client.Get(key) if !exists { - err = NotFoundWithCause(errors.New("value not found in Ristretto store")) + err = lib_store.NotFoundWithCause(errors.New("value not found in Ristretto store")) } return value, err @@ -56,8 +58,8 @@ func (s *RistrettoStore) GetWithTTL(ctx context.Context, key any) (any, time.Dur } // Set defines data in Ristretto memoey cache for given key identifier -func (s *RistrettoStore) Set(ctx context.Context, key any, value any, options ...Option) error { - opts := ApplyOptionsWithDefault(s.options, options...) +func (s *RistrettoStore) Set(ctx context.Context, key any, value any, options ...lib_store.Option) error { + opts := lib_store.ApplyOptionsWithDefault(s.options, options...) var err error @@ -99,7 +101,7 @@ func (s *RistrettoStore) setTags(ctx context.Context, key any, tags []string) { cacheKeys = append(cacheKeys, key.(string)) } - s.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), WithExpiration(720*time.Hour)) + s.Set(ctx, tagKey, []byte(strings.Join(cacheKeys, ",")), lib_store.WithExpiration(720*time.Hour)) } } @@ -110,8 +112,8 @@ func (s *RistrettoStore) Delete(_ context.Context, key any) error { } // Invalidate invalidates some cache data in Redis for given options -func (s *RistrettoStore) Invalidate(ctx context.Context, options ...InvalidateOption) error { - opts := ApplyInvalidateOptions(options...) +func (s *RistrettoStore) Invalidate(ctx context.Context, options ...lib_store.InvalidateOption) error { + opts := lib_store.ApplyInvalidateOptions(options...) if tags := opts.Tags; len(tags) > 0 { for _, tag := range tags { diff --git a/store/ristretto_bench_test.go b/store/ristretto/ristretto_bench_test.go similarity index 87% rename from store/ristretto_bench_test.go rename to store/ristretto/ristretto_bench_test.go index cc61317..0977054 100644 --- a/store/ristretto_bench_test.go +++ b/store/ristretto/ristretto_bench_test.go @@ -1,4 +1,4 @@ -package store +package ristretto import ( "context" @@ -7,6 +7,7 @@ import ( "testing" "github.com/dgraph-io/ristretto" + lib_store "github.com/eko/gocache/v4/lib/store" ) func BenchmarkRistrettoSet(b *testing.B) { @@ -29,7 +30,7 @@ func BenchmarkRistrettoSet(b *testing.B) { key := fmt.Sprintf("test-%d", n) value := []byte(fmt.Sprintf("value-%d", n)) - store.Set(ctx, key, value, WithTags([]string{fmt.Sprintf("tag-%d", n)})) + store.Set(ctx, key, value, lib_store.WithTags([]string{fmt.Sprintf("tag-%d", n)})) } }) } diff --git a/test/mocks/store/clients/ristretto_interface.go b/store/ristretto/ristretto_mock.go similarity index 99% rename from test/mocks/store/clients/ristretto_interface.go rename to store/ristretto/ristretto_mock.go index bcc69dc..4b11159 100644 --- a/test/mocks/store/clients/ristretto_interface.go +++ b/store/ristretto/ristretto_mock.go @@ -2,7 +2,7 @@ // Source: store/ristretto.go // Package mocks is a generated GoMock package. -package mocks +package ristretto import ( reflect "reflect" diff --git a/store/ristretto_test.go b/store/ristretto/ristretto_test.go similarity index 76% rename from store/ristretto_test.go rename to store/ristretto/ristretto_test.go index 48d0480..e05bdd5 100644 --- a/store/ristretto_test.go +++ b/store/ristretto/ristretto_test.go @@ -1,4 +1,4 @@ -package store +package ristretto import ( "context" @@ -6,7 +6,7 @@ import ( "testing" "time" - mocksStore "github.com/eko/gocache/v3/test/mocks/store/clients" + lib_store "github.com/eko/gocache/v4/lib/store" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -15,15 +15,15 @@ func TestNewRistretto(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) // When - store := NewRistretto(client, WithCost(8)) + store := NewRistretto(client, lib_store.WithCost(8)) // Then assert.IsType(t, new(RistrettoStore), store) assert.Equal(t, client, store.client) - assert.Equal(t, &Options{Cost: 8}, store.options) + assert.Equal(t, &lib_store.Options{Cost: 8}, store.options) } func TestRistrettoGet(t *testing.T) { @@ -35,7 +35,7 @@ func TestRistrettoGet(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(cacheValue, true) store := NewRistretto(client) @@ -56,7 +56,7 @@ func TestRistrettoGetWhenError(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, false) store := NewRistretto(client) @@ -66,7 +66,7 @@ func TestRistrettoGetWhenError(t *testing.T) { // Then assert.Nil(t, value) - assert.IsType(t, &NotFound{}, err) + assert.IsType(t, &lib_store.NotFound{}, err) } func TestRistrettoGetWithTTL(t *testing.T) { @@ -78,7 +78,7 @@ func TestRistrettoGetWithTTL(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(cacheValue, true) store := NewRistretto(client) @@ -100,7 +100,7 @@ func TestRistrettoGetWithTTLWhenError(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Get(cacheKey).Return(nil, false) store := NewRistretto(client) @@ -110,7 +110,7 @@ func TestRistrettoGetWithTTLWhenError(t *testing.T) { // Then assert.Nil(t, value) - assert.IsType(t, &NotFound{}, err) + assert.IsType(t, &lib_store.NotFound{}, err) assert.Equal(t, 0*time.Second, ttl) } @@ -123,13 +123,13 @@ func TestRistrettoSet(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().SetWithTTL(cacheKey, cacheValue, int64(4), 0*time.Second).Return(true) - store := NewRistretto(client, WithCost(7)) + store := NewRistretto(client, lib_store.WithCost(7)) // When - err := store.Set(ctx, cacheKey, cacheValue, WithCost(4)) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithCost(4)) // Then assert.Nil(t, err) @@ -144,10 +144,10 @@ func TestRistrettoSetWhenNoOptionsGiven(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().SetWithTTL(cacheKey, cacheValue, int64(7), 0*time.Second).Return(true) - store := NewRistretto(client, WithCost(7)) + store := NewRistretto(client, lib_store.WithCost(7)) // When err := store.Set(ctx, cacheKey, cacheValue) @@ -165,10 +165,10 @@ func TestRistrettoSetWhenError(t *testing.T) { cacheKey := "my-key" cacheValue := "my-cache-value" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().SetWithTTL(cacheKey, cacheValue, int64(7), 0*time.Second).Return(false) - store := NewRistretto(client, WithCost(7)) + store := NewRistretto(client, lib_store.WithCost(7)) // When err := store.Set(ctx, cacheKey, cacheValue) @@ -186,7 +186,7 @@ func TestRistrettoSetWithTags(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().SetWithTTL(cacheKey, cacheValue, int64(0), 0*time.Second).Return(true) client.EXPECT().Get("gocache_tag_tag1").Return(nil, true) client.EXPECT().SetWithTTL("gocache_tag_tag1", []byte("my-key"), int64(0), 720*time.Hour).Return(true) @@ -194,7 +194,7 @@ func TestRistrettoSetWithTags(t *testing.T) { store := NewRistretto(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -209,7 +209,7 @@ func TestRistrettoSetWithTagsWhenAlreadyInserted(t *testing.T) { cacheKey := "my-key" cacheValue := []byte("my-cache-value") - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().SetWithTTL(cacheKey, cacheValue, int64(0), 0*time.Second).Return(true) client.EXPECT().Get("gocache_tag_tag1").Return([]byte("my-key,a-second-key"), true) client.EXPECT().SetWithTTL("gocache_tag_tag1", []byte("my-key,a-second-key"), int64(0), 720*time.Hour).Return(true) @@ -217,7 +217,7 @@ func TestRistrettoSetWithTagsWhenAlreadyInserted(t *testing.T) { store := NewRistretto(client) // When - err := store.Set(ctx, cacheKey, cacheValue, WithTags([]string{"tag1"})) + err := store.Set(ctx, cacheKey, cacheValue, lib_store.WithTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -231,7 +231,7 @@ func TestRistrettoDelete(t *testing.T) { cacheKey := "my-key" - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Del(cacheKey) store := NewRistretto(client) @@ -251,7 +251,7 @@ func TestRistrettoInvalidate(t *testing.T) { cacheKeys := []byte("a23fdf987h2svc23,jHG2372x38hf74") - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, true) client.EXPECT().Del("a23fdf987h2svc23") client.EXPECT().Del("jHG2372x38hf74") @@ -259,7 +259,7 @@ func TestRistrettoInvalidate(t *testing.T) { store := NewRistretto(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -273,13 +273,13 @@ func TestRistrettoInvalidateWhenError(t *testing.T) { cacheKeys := []byte("a23fdf987h2svc23,jHG2372x38hf74") - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Get("gocache_tag_tag1").Return(cacheKeys, false) store := NewRistretto(client) // When - err := store.Invalidate(ctx, WithInvalidateTags([]string{"tag1"})) + err := store.Invalidate(ctx, lib_store.WithInvalidateTags([]string{"tag1"})) // Then assert.Nil(t, err) @@ -291,7 +291,7 @@ func TestRistrettoClear(t *testing.T) { ctx := context.Background() - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) client.EXPECT().Clear() store := NewRistretto(client) @@ -307,7 +307,7 @@ func TestRistrettoGetType(t *testing.T) { // Given ctrl := gomock.NewController(t) - client := mocksStore.NewMockRistrettoClientInterface(ctrl) + client := NewMockRistrettoClientInterface(ctrl) store := NewRistretto(client) diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/LICENSE b/vendor/github.com/XiaoMi/pegasus-go-client/LICENSE deleted file mode 100644 index a840c35..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/admin/client.go b/vendor/github.com/XiaoMi/pegasus-go-client/admin/client.go deleted file mode 100644 index 0f31027..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/admin/client.go +++ /dev/null @@ -1,115 +0,0 @@ -package admin - -import ( - "context" - "fmt" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/admin" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// Client provides the administration API to a specific cluster. -// Remember only the superusers configured to the cluster have the admin priviledges. -type Client interface { - CreateTable(ctx context.Context, tableName string, partitionCount int) error - - DropTable(ctx context.Context, tableName string) error - - ListTables(ctx context.Context) ([]*TableInfo, error) -} - -// TableInfo is the table information. -type TableInfo struct { - Name string - - // Envs is a set of attributes binding to this table. - Envs map[string]string -} - -type Config struct { - MetaServers []string `json:"meta_servers"` -} - -// NewClient returns an instance of Client. -func NewClient(cfg Config) Client { - return &rpcBasedClient{ - metaManager: session.NewMetaManager(cfg.MetaServers, session.NewNodeSession), - } -} - -type rpcBasedClient struct { - metaManager *session.MetaManager -} - -func (c *rpcBasedClient) waitTableReady(ctx context.Context, tableName string, partitionCount int) error { - const replicaCount int = 3 - - for { - resp, err := c.metaManager.QueryConfig(ctx, tableName) - if err != nil { - return err - } - if resp.GetErr().Errno != base.ERR_OK.String() { - return fmt.Errorf("QueryConfig failed: %s", resp.GetErr().String()) - } - - readyCount := 0 - for _, part := range resp.Partitions { - if part.Primary.GetRawAddress() != 0 && len(part.Secondaries)+1 == replicaCount { - readyCount++ - } - } - if readyCount == partitionCount { - break - } - time.Sleep(time.Second) - } - return nil -} - -func (c *rpcBasedClient) CreateTable(ctx context.Context, tableName string, partitionCount int) error { - _, err := c.metaManager.CreateApp(ctx, &admin.CreateAppRequest{ - AppName: tableName, - Options: &admin.CreateAppOptions{ - PartitionCount: int32(partitionCount), - ReplicaCount: 3, - AppType: "pegasus", - Envs: make(map[string]string), - IsStateful: true, - }, - }) - if err != nil { - return err - } - err = c.waitTableReady(ctx, tableName, partitionCount) - return err -} - -func (c *rpcBasedClient) DropTable(ctx context.Context, tableName string) error { - req := admin.NewDropAppRequest() - req.AppName = tableName - reserveSeconds := int64(1) // delete immediately. the caller is responsible for the soft deletion of table. - req.Options = &admin.DropAppOptions{ - SuccessIfNotExist: true, - ReserveSeconds: &reserveSeconds, - } - _, err := c.metaManager.DropApp(ctx, req) - return err -} - -func (c *rpcBasedClient) ListTables(ctx context.Context) ([]*TableInfo, error) { - resp, err := c.metaManager.ListApps(ctx, &admin.ListAppsRequest{ - Status: admin.AppStatus_AS_AVAILABLE, - }) - if err != nil { - return nil, err - } - - var results []*TableInfo - for _, app := range resp.Infos { - results = append(results, &TableInfo{Name: app.AppName, Envs: app.Envs}) - } - return results, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/admin/remote_cmd_client.go b/vendor/github.com/XiaoMi/pegasus-go-client/admin/remote_cmd_client.go deleted file mode 100644 index 2238925..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/admin/remote_cmd_client.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package admin - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/cmd" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// RemoteCmdClient is a client to call remote command to a PegasusServer. -type RemoteCmdClient struct { - session session.NodeSession -} - -// NewRemoteCmdClient returns an instance of RemoteCmdClient. -func NewRemoteCmdClient(addr string, nodeType session.NodeType) *RemoteCmdClient { - return &RemoteCmdClient{ - session: session.NewNodeSession(addr, nodeType), - } -} - -// Call a remote command. -func (c *RemoteCmdClient) Call(ctx context.Context, command string, arguments []string) (cmdResult string, err error) { - rcmd := &RemoteCommand{ - Command: command, - Arguments: arguments, - } - return rcmd.Call(ctx, c.session) -} - -// RemoteCommand can be called concurrently by multiple sessions. -type RemoteCommand struct { - Command string - Arguments []string -} - -// Call a remote command to an existing session. -func (c *RemoteCommand) Call(ctx context.Context, session session.NodeSession) (cmdResult string, err error) { - thriftArgs := &cmd.RemoteCmdServiceCallCommandArgs{ - Cmd: &cmd.Command{Cmd: c.Command, Arguments: c.Arguments}, - } - res, err := session.CallWithGpid(ctx, &base.Gpid{}, thriftArgs, "RPC_CLI_CLI_CALL") - if err != nil { - return "", err - } - ret, _ := res.(*cmd.RemoteCmdServiceCallCommandResult) - return ret.GetSuccess(), nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/GoUnusedProtection__.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/GoUnusedProtection__.go deleted file mode 100644 index 86e6c7e..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package admin - -var GoUnusedProtection__ int diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/admin-consts.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/admin-consts.go deleted file mode 100644 index c681065..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/admin-consts.go +++ /dev/null @@ -1,25 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package admin - -import ( - "bytes" - "context" - "fmt" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/admin.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/admin.go deleted file mode 100644 index 86e2c5d..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/admin/admin.go +++ /dev/null @@ -1,11774 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package admin - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "reflect" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/pegasus-kv/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ - -type AppStatus int64 - -const ( - AppStatus_AS_INVALID AppStatus = 0 - AppStatus_AS_AVAILABLE AppStatus = 1 - AppStatus_AS_CREATING AppStatus = 2 - AppStatus_AS_CREATE_FAILED AppStatus = 3 - AppStatus_AS_DROPPING AppStatus = 4 - AppStatus_AS_DROP_FAILED AppStatus = 5 - AppStatus_AS_DROPPED AppStatus = 6 - AppStatus_AS_RECALLING AppStatus = 7 -) - -func (p AppStatus) String() string { - switch p { - case AppStatus_AS_INVALID: - return "AS_INVALID" - case AppStatus_AS_AVAILABLE: - return "AS_AVAILABLE" - case AppStatus_AS_CREATING: - return "AS_CREATING" - case AppStatus_AS_CREATE_FAILED: - return "AS_CREATE_FAILED" - case AppStatus_AS_DROPPING: - return "AS_DROPPING" - case AppStatus_AS_DROP_FAILED: - return "AS_DROP_FAILED" - case AppStatus_AS_DROPPED: - return "AS_DROPPED" - case AppStatus_AS_RECALLING: - return "AS_RECALLING" - } - return "" -} - -func AppStatusFromString(s string) (AppStatus, error) { - switch s { - case "AS_INVALID": - return AppStatus_AS_INVALID, nil - case "AS_AVAILABLE": - return AppStatus_AS_AVAILABLE, nil - case "AS_CREATING": - return AppStatus_AS_CREATING, nil - case "AS_CREATE_FAILED": - return AppStatus_AS_CREATE_FAILED, nil - case "AS_DROPPING": - return AppStatus_AS_DROPPING, nil - case "AS_DROP_FAILED": - return AppStatus_AS_DROP_FAILED, nil - case "AS_DROPPED": - return AppStatus_AS_DROPPED, nil - case "AS_RECALLING": - return AppStatus_AS_RECALLING, nil - } - return AppStatus(0), fmt.Errorf("not a valid AppStatus string") -} - -func AppStatusPtr(v AppStatus) *AppStatus { return &v } - -func (p AppStatus) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *AppStatus) UnmarshalText(text []byte) error { - q, err := AppStatusFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *AppStatus) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = AppStatus(v) - return nil -} - -func (p *AppStatus) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type AppEnvOperation int64 - -const ( - AppEnvOperation_APP_ENV_OP_INVALID AppEnvOperation = 0 - AppEnvOperation_APP_ENV_OP_SET AppEnvOperation = 1 - AppEnvOperation_APP_ENV_OP_DEL AppEnvOperation = 2 - AppEnvOperation_APP_ENV_OP_CLEAR AppEnvOperation = 3 -) - -func (p AppEnvOperation) String() string { - switch p { - case AppEnvOperation_APP_ENV_OP_INVALID: - return "APP_ENV_OP_INVALID" - case AppEnvOperation_APP_ENV_OP_SET: - return "APP_ENV_OP_SET" - case AppEnvOperation_APP_ENV_OP_DEL: - return "APP_ENV_OP_DEL" - case AppEnvOperation_APP_ENV_OP_CLEAR: - return "APP_ENV_OP_CLEAR" - } - return "" -} - -func AppEnvOperationFromString(s string) (AppEnvOperation, error) { - switch s { - case "APP_ENV_OP_INVALID": - return AppEnvOperation_APP_ENV_OP_INVALID, nil - case "APP_ENV_OP_SET": - return AppEnvOperation_APP_ENV_OP_SET, nil - case "APP_ENV_OP_DEL": - return AppEnvOperation_APP_ENV_OP_DEL, nil - case "APP_ENV_OP_CLEAR": - return AppEnvOperation_APP_ENV_OP_CLEAR, nil - } - return AppEnvOperation(0), fmt.Errorf("not a valid AppEnvOperation string") -} - -func AppEnvOperationPtr(v AppEnvOperation) *AppEnvOperation { return &v } - -func (p AppEnvOperation) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *AppEnvOperation) UnmarshalText(text []byte) error { - q, err := AppEnvOperationFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *AppEnvOperation) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = AppEnvOperation(v) - return nil -} - -func (p *AppEnvOperation) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type NodeStatus int64 - -const ( - NodeStatus_NS_INVALID NodeStatus = 0 - NodeStatus_NS_ALIVE NodeStatus = 1 - NodeStatus_NS_UNALIVE NodeStatus = 2 -) - -func (p NodeStatus) String() string { - switch p { - case NodeStatus_NS_INVALID: - return "NS_INVALID" - case NodeStatus_NS_ALIVE: - return "NS_ALIVE" - case NodeStatus_NS_UNALIVE: - return "NS_UNALIVE" - } - return "" -} - -func NodeStatusFromString(s string) (NodeStatus, error) { - switch s { - case "NS_INVALID": - return NodeStatus_NS_INVALID, nil - case "NS_ALIVE": - return NodeStatus_NS_ALIVE, nil - case "NS_UNALIVE": - return NodeStatus_NS_UNALIVE, nil - } - return NodeStatus(0), fmt.Errorf("not a valid NodeStatus string") -} - -func NodeStatusPtr(v NodeStatus) *NodeStatus { return &v } - -func (p NodeStatus) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *NodeStatus) UnmarshalText(text []byte) error { - q, err := NodeStatusFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *NodeStatus) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = NodeStatus(v) - return nil -} - -func (p *NodeStatus) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type MetaFunctionLevel int64 - -const ( - MetaFunctionLevel_fl_stopped MetaFunctionLevel = 100 - MetaFunctionLevel_fl_blind MetaFunctionLevel = 200 - MetaFunctionLevel_fl_freezed MetaFunctionLevel = 300 - MetaFunctionLevel_fl_steady MetaFunctionLevel = 400 - MetaFunctionLevel_fl_lively MetaFunctionLevel = 500 - MetaFunctionLevel_fl_invalid MetaFunctionLevel = 10000 -) - -func (p MetaFunctionLevel) String() string { - switch p { - case MetaFunctionLevel_fl_stopped: - return "fl_stopped" - case MetaFunctionLevel_fl_blind: - return "fl_blind" - case MetaFunctionLevel_fl_freezed: - return "fl_freezed" - case MetaFunctionLevel_fl_steady: - return "fl_steady" - case MetaFunctionLevel_fl_lively: - return "fl_lively" - case MetaFunctionLevel_fl_invalid: - return "fl_invalid" - } - return "" -} - -func MetaFunctionLevelFromString(s string) (MetaFunctionLevel, error) { - switch s { - case "fl_stopped": - return MetaFunctionLevel_fl_stopped, nil - case "fl_blind": - return MetaFunctionLevel_fl_blind, nil - case "fl_freezed": - return MetaFunctionLevel_fl_freezed, nil - case "fl_steady": - return MetaFunctionLevel_fl_steady, nil - case "fl_lively": - return MetaFunctionLevel_fl_lively, nil - case "fl_invalid": - return MetaFunctionLevel_fl_invalid, nil - } - return MetaFunctionLevel(0), fmt.Errorf("not a valid MetaFunctionLevel string") -} - -func MetaFunctionLevelPtr(v MetaFunctionLevel) *MetaFunctionLevel { return &v } - -func (p MetaFunctionLevel) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *MetaFunctionLevel) UnmarshalText(text []byte) error { - q, err := MetaFunctionLevelFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *MetaFunctionLevel) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = MetaFunctionLevel(v) - return nil -} - -func (p *MetaFunctionLevel) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type DuplicationStatus int64 - -const ( - DuplicationStatus_DS_INIT DuplicationStatus = 0 - DuplicationStatus_DS_START DuplicationStatus = 1 - DuplicationStatus_DS_PAUSE DuplicationStatus = 2 - DuplicationStatus_DS_REMOVED DuplicationStatus = 3 -) - -func (p DuplicationStatus) String() string { - switch p { - case DuplicationStatus_DS_INIT: - return "DS_INIT" - case DuplicationStatus_DS_START: - return "DS_START" - case DuplicationStatus_DS_PAUSE: - return "DS_PAUSE" - case DuplicationStatus_DS_REMOVED: - return "DS_REMOVED" - } - return "" -} - -func DuplicationStatusFromString(s string) (DuplicationStatus, error) { - switch s { - case "DS_INIT": - return DuplicationStatus_DS_INIT, nil - case "DS_START": - return DuplicationStatus_DS_START, nil - case "DS_PAUSE": - return DuplicationStatus_DS_PAUSE, nil - case "DS_REMOVED": - return DuplicationStatus_DS_REMOVED, nil - } - return DuplicationStatus(0), fmt.Errorf("not a valid DuplicationStatus string") -} - -func DuplicationStatusPtr(v DuplicationStatus) *DuplicationStatus { return &v } - -func (p DuplicationStatus) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *DuplicationStatus) UnmarshalText(text []byte) error { - q, err := DuplicationStatusFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *DuplicationStatus) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = DuplicationStatus(v) - return nil -} - -func (p *DuplicationStatus) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type DuplicationFailMode int64 - -const ( - DuplicationFailMode_FAIL_SLOW DuplicationFailMode = 0 - DuplicationFailMode_FAIL_SKIP DuplicationFailMode = 1 - DuplicationFailMode_FAIL_FAST DuplicationFailMode = 2 -) - -func (p DuplicationFailMode) String() string { - switch p { - case DuplicationFailMode_FAIL_SLOW: - return "FAIL_SLOW" - case DuplicationFailMode_FAIL_SKIP: - return "FAIL_SKIP" - case DuplicationFailMode_FAIL_FAST: - return "FAIL_FAST" - } - return "" -} - -func DuplicationFailModeFromString(s string) (DuplicationFailMode, error) { - switch s { - case "FAIL_SLOW": - return DuplicationFailMode_FAIL_SLOW, nil - case "FAIL_SKIP": - return DuplicationFailMode_FAIL_SKIP, nil - case "FAIL_FAST": - return DuplicationFailMode_FAIL_FAST, nil - } - return DuplicationFailMode(0), fmt.Errorf("not a valid DuplicationFailMode string") -} - -func DuplicationFailModePtr(v DuplicationFailMode) *DuplicationFailMode { return &v } - -func (p DuplicationFailMode) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *DuplicationFailMode) UnmarshalText(text []byte) error { - q, err := DuplicationFailModeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *DuplicationFailMode) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = DuplicationFailMode(v) - return nil -} - -func (p *DuplicationFailMode) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type ConfigType int64 - -const ( - ConfigType_CT_INVALID ConfigType = 0 - ConfigType_CT_ASSIGN_PRIMARY ConfigType = 1 - ConfigType_CT_UPGRADE_TO_PRIMARY ConfigType = 2 - ConfigType_CT_ADD_SECONDARY ConfigType = 3 - ConfigType_CT_UPGRADE_TO_SECONDARY ConfigType = 4 - ConfigType_CT_DOWNGRADE_TO_SECONDARY ConfigType = 5 - ConfigType_CT_DOWNGRADE_TO_INACTIVE ConfigType = 6 - ConfigType_CT_REMOVE ConfigType = 7 - ConfigType_CT_ADD_SECONDARY_FOR_LB ConfigType = 8 - ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT ConfigType = 9 - ConfigType_CT_DROP_PARTITION ConfigType = 10 - ConfigType_CT_REGISTER_CHILD ConfigType = 11 -) - -func (p ConfigType) String() string { - switch p { - case ConfigType_CT_INVALID: - return "CT_INVALID" - case ConfigType_CT_ASSIGN_PRIMARY: - return "CT_ASSIGN_PRIMARY" - case ConfigType_CT_UPGRADE_TO_PRIMARY: - return "CT_UPGRADE_TO_PRIMARY" - case ConfigType_CT_ADD_SECONDARY: - return "CT_ADD_SECONDARY" - case ConfigType_CT_UPGRADE_TO_SECONDARY: - return "CT_UPGRADE_TO_SECONDARY" - case ConfigType_CT_DOWNGRADE_TO_SECONDARY: - return "CT_DOWNGRADE_TO_SECONDARY" - case ConfigType_CT_DOWNGRADE_TO_INACTIVE: - return "CT_DOWNGRADE_TO_INACTIVE" - case ConfigType_CT_REMOVE: - return "CT_REMOVE" - case ConfigType_CT_ADD_SECONDARY_FOR_LB: - return "CT_ADD_SECONDARY_FOR_LB" - case ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT: - return "CT_PRIMARY_FORCE_UPDATE_BALLOT" - case ConfigType_CT_DROP_PARTITION: - return "CT_DROP_PARTITION" - case ConfigType_CT_REGISTER_CHILD: - return "CT_REGISTER_CHILD" - } - return "" -} - -func ConfigTypeFromString(s string) (ConfigType, error) { - switch s { - case "CT_INVALID": - return ConfigType_CT_INVALID, nil - case "CT_ASSIGN_PRIMARY": - return ConfigType_CT_ASSIGN_PRIMARY, nil - case "CT_UPGRADE_TO_PRIMARY": - return ConfigType_CT_UPGRADE_TO_PRIMARY, nil - case "CT_ADD_SECONDARY": - return ConfigType_CT_ADD_SECONDARY, nil - case "CT_UPGRADE_TO_SECONDARY": - return ConfigType_CT_UPGRADE_TO_SECONDARY, nil - case "CT_DOWNGRADE_TO_SECONDARY": - return ConfigType_CT_DOWNGRADE_TO_SECONDARY, nil - case "CT_DOWNGRADE_TO_INACTIVE": - return ConfigType_CT_DOWNGRADE_TO_INACTIVE, nil - case "CT_REMOVE": - return ConfigType_CT_REMOVE, nil - case "CT_ADD_SECONDARY_FOR_LB": - return ConfigType_CT_ADD_SECONDARY_FOR_LB, nil - case "CT_PRIMARY_FORCE_UPDATE_BALLOT": - return ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT, nil - case "CT_DROP_PARTITION": - return ConfigType_CT_DROP_PARTITION, nil - case "CT_REGISTER_CHILD": - return ConfigType_CT_REGISTER_CHILD, nil - } - return ConfigType(0), fmt.Errorf("not a valid ConfigType string") -} - -func ConfigTypePtr(v ConfigType) *ConfigType { return &v } - -func (p ConfigType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *ConfigType) UnmarshalText(text []byte) error { - q, err := ConfigTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *ConfigType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = ConfigType(v) - return nil -} - -func (p *ConfigType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type BalancerRequestType int64 - -const ( - BalancerRequestType_move_primary BalancerRequestType = 0 - BalancerRequestType_copy_primary BalancerRequestType = 1 - BalancerRequestType_copy_secondary BalancerRequestType = 2 -) - -func (p BalancerRequestType) String() string { - switch p { - case BalancerRequestType_move_primary: - return "move_primary" - case BalancerRequestType_copy_primary: - return "copy_primary" - case BalancerRequestType_copy_secondary: - return "copy_secondary" - } - return "" -} - -func BalancerRequestTypeFromString(s string) (BalancerRequestType, error) { - switch s { - case "move_primary": - return BalancerRequestType_move_primary, nil - case "copy_primary": - return BalancerRequestType_copy_primary, nil - case "copy_secondary": - return BalancerRequestType_copy_secondary, nil - } - return BalancerRequestType(0), fmt.Errorf("not a valid BalancerRequestType string") -} - -func BalancerRequestTypePtr(v BalancerRequestType) *BalancerRequestType { return &v } - -func (p BalancerRequestType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *BalancerRequestType) UnmarshalText(text []byte) error { - q, err := BalancerRequestTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *BalancerRequestType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = BalancerRequestType(v) - return nil -} - -func (p *BalancerRequestType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Attributes: -// - PartitionCount -// - ReplicaCount -// - SuccessIfExist -// - AppType -// - IsStateful -// - Envs -type CreateAppOptions struct { - PartitionCount int32 `thrift:"partition_count,1" db:"partition_count" json:"partition_count"` - ReplicaCount int32 `thrift:"replica_count,2" db:"replica_count" json:"replica_count"` - SuccessIfExist bool `thrift:"success_if_exist,3" db:"success_if_exist" json:"success_if_exist"` - AppType string `thrift:"app_type,4" db:"app_type" json:"app_type"` - IsStateful bool `thrift:"is_stateful,5" db:"is_stateful" json:"is_stateful"` - Envs map[string]string `thrift:"envs,6" db:"envs" json:"envs"` -} - -func NewCreateAppOptions() *CreateAppOptions { - return &CreateAppOptions{} -} - -func (p *CreateAppOptions) GetPartitionCount() int32 { - return p.PartitionCount -} - -func (p *CreateAppOptions) GetReplicaCount() int32 { - return p.ReplicaCount -} - -func (p *CreateAppOptions) GetSuccessIfExist() bool { - return p.SuccessIfExist -} - -func (p *CreateAppOptions) GetAppType() string { - return p.AppType -} - -func (p *CreateAppOptions) GetIsStateful() bool { - return p.IsStateful -} - -func (p *CreateAppOptions) GetEnvs() map[string]string { - return p.Envs -} -func (p *CreateAppOptions) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.MAP { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CreateAppOptions) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.PartitionCount = v - } - return nil -} - -func (p *CreateAppOptions) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.ReplicaCount = v - } - return nil -} - -func (p *CreateAppOptions) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SuccessIfExist = v - } - return nil -} - -func (p *CreateAppOptions) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.AppType = v - } - return nil -} - -func (p *CreateAppOptions) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.IsStateful = v - } - return nil -} - -func (p *CreateAppOptions) ReadField6(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string]string, size) - p.Envs = tMap - for i := 0; i < size; i++ { - var _key0 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key0 = v - } - var _val1 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val1 = v - } - p.Envs[_key0] = _val1 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *CreateAppOptions) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("create_app_options"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CreateAppOptions) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:partition_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_count (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:partition_count: ", p), err) - } - return err -} - -func (p *CreateAppOptions) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("replica_count", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:replica_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.ReplicaCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.replica_count (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:replica_count: ", p), err) - } - return err -} - -func (p *CreateAppOptions) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("success_if_exist", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:success_if_exist: ", p), err) - } - if err := oprot.WriteBool(bool(p.SuccessIfExist)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.success_if_exist (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:success_if_exist: ", p), err) - } - return err -} - -func (p *CreateAppOptions) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_type: ", p), err) - } - if err := oprot.WriteString(string(p.AppType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_type (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_type: ", p), err) - } - return err -} - -func (p *CreateAppOptions) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:is_stateful: ", p), err) - } - if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.is_stateful (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:is_stateful: ", p), err) - } - return err -} - -func (p *CreateAppOptions) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("envs", thrift.MAP, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:envs: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Envs)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Envs { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:envs: ", p), err) - } - return err -} - -func (p *CreateAppOptions) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CreateAppOptions(%+v)", *p) -} - -// Attributes: -// - AppName -// - Options -type CreateAppRequest struct { - AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` - Options *CreateAppOptions `thrift:"options,2" db:"options" json:"options"` -} - -func NewCreateAppRequest() *CreateAppRequest { - return &CreateAppRequest{} -} - -func (p *CreateAppRequest) GetAppName() string { - return p.AppName -} - -var CreateAppRequest_Options_DEFAULT *CreateAppOptions - -func (p *CreateAppRequest) GetOptions() *CreateAppOptions { - if !p.IsSetOptions() { - return CreateAppRequest_Options_DEFAULT - } - return p.Options -} -func (p *CreateAppRequest) IsSetOptions() bool { - return p.Options != nil -} - -func (p *CreateAppRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CreateAppRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *CreateAppRequest) ReadField2(iprot thrift.TProtocol) error { - p.Options = &CreateAppOptions{} - if err := p.Options.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Options), err) - } - return nil -} - -func (p *CreateAppRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("create_app_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CreateAppRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) - } - return err -} - -func (p *CreateAppRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("options", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:options: ", p), err) - } - if err := p.Options.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Options), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:options: ", p), err) - } - return err -} - -func (p *CreateAppRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CreateAppRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Appid -type CreateAppResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` -} - -func NewCreateAppResponse() *CreateAppResponse { - return &CreateAppResponse{} -} - -var CreateAppResponse_Err_DEFAULT *base.ErrorCode - -func (p *CreateAppResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return CreateAppResponse_Err_DEFAULT - } - return p.Err -} - -func (p *CreateAppResponse) GetAppid() int32 { - return p.Appid -} -func (p *CreateAppResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *CreateAppResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CreateAppResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *CreateAppResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Appid = v - } - return nil -} - -func (p *CreateAppResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("create_app_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CreateAppResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *CreateAppResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) - } - if err := oprot.WriteI32(int32(p.Appid)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) - } - return err -} - -func (p *CreateAppResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CreateAppResponse(%+v)", *p) -} - -// Attributes: -// - SuccessIfNotExist -// - ReserveSeconds -type DropAppOptions struct { - SuccessIfNotExist bool `thrift:"success_if_not_exist,1" db:"success_if_not_exist" json:"success_if_not_exist"` - ReserveSeconds *int64 `thrift:"reserve_seconds,2" db:"reserve_seconds" json:"reserve_seconds,omitempty"` -} - -func NewDropAppOptions() *DropAppOptions { - return &DropAppOptions{} -} - -func (p *DropAppOptions) GetSuccessIfNotExist() bool { - return p.SuccessIfNotExist -} - -var DropAppOptions_ReserveSeconds_DEFAULT int64 - -func (p *DropAppOptions) GetReserveSeconds() int64 { - if !p.IsSetReserveSeconds() { - return DropAppOptions_ReserveSeconds_DEFAULT - } - return *p.ReserveSeconds -} -func (p *DropAppOptions) IsSetReserveSeconds() bool { - return p.ReserveSeconds != nil -} - -func (p *DropAppOptions) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DropAppOptions) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.SuccessIfNotExist = v - } - return nil -} - -func (p *DropAppOptions) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.ReserveSeconds = &v - } - return nil -} - -func (p *DropAppOptions) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("drop_app_options"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DropAppOptions) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("success_if_not_exist", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:success_if_not_exist: ", p), err) - } - if err := oprot.WriteBool(bool(p.SuccessIfNotExist)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.success_if_not_exist (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:success_if_not_exist: ", p), err) - } - return err -} - -func (p *DropAppOptions) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetReserveSeconds() { - if err := oprot.WriteFieldBegin("reserve_seconds", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:reserve_seconds: ", p), err) - } - if err := oprot.WriteI64(int64(*p.ReserveSeconds)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.reserve_seconds (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:reserve_seconds: ", p), err) - } - } - return err -} - -func (p *DropAppOptions) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DropAppOptions(%+v)", *p) -} - -// Attributes: -// - AppName -// - Options -type DropAppRequest struct { - AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` - Options *DropAppOptions `thrift:"options,2" db:"options" json:"options"` -} - -func NewDropAppRequest() *DropAppRequest { - return &DropAppRequest{} -} - -func (p *DropAppRequest) GetAppName() string { - return p.AppName -} - -var DropAppRequest_Options_DEFAULT *DropAppOptions - -func (p *DropAppRequest) GetOptions() *DropAppOptions { - if !p.IsSetOptions() { - return DropAppRequest_Options_DEFAULT - } - return p.Options -} -func (p *DropAppRequest) IsSetOptions() bool { - return p.Options != nil -} - -func (p *DropAppRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DropAppRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *DropAppRequest) ReadField2(iprot thrift.TProtocol) error { - p.Options = &DropAppOptions{} - if err := p.Options.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Options), err) - } - return nil -} - -func (p *DropAppRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("drop_app_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DropAppRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) - } - return err -} - -func (p *DropAppRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("options", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:options: ", p), err) - } - if err := p.Options.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Options), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:options: ", p), err) - } - return err -} - -func (p *DropAppRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DropAppRequest(%+v)", *p) -} - -// Attributes: -// - Err -type DropAppResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` -} - -func NewDropAppResponse() *DropAppResponse { - return &DropAppResponse{} -} - -var DropAppResponse_Err_DEFAULT *base.ErrorCode - -func (p *DropAppResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return DropAppResponse_Err_DEFAULT - } - return p.Err -} -func (p *DropAppResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *DropAppResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DropAppResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *DropAppResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("drop_app_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DropAppResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *DropAppResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DropAppResponse(%+v)", *p) -} - -// Attributes: -// - AppID -// - NewAppName_ -type RecallAppRequest struct { - AppID int32 `thrift:"app_id,1" db:"app_id" json:"app_id"` - NewAppName_ string `thrift:"new_app_name,2" db:"new_app_name" json:"new_app_name"` -} - -func NewRecallAppRequest() *RecallAppRequest { - return &RecallAppRequest{} -} - -func (p *RecallAppRequest) GetAppID() int32 { - return p.AppID -} - -func (p *RecallAppRequest) GetNewAppName_() string { - return p.NewAppName_ -} -func (p *RecallAppRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RecallAppRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *RecallAppRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.NewAppName_ = v - } - return nil -} - -func (p *RecallAppRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("recall_app_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RecallAppRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_id: ", p), err) - } - return err -} - -func (p *RecallAppRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("new_app_name", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_app_name: ", p), err) - } - if err := oprot.WriteString(string(p.NewAppName_)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.new_app_name (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_app_name: ", p), err) - } - return err -} - -func (p *RecallAppRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RecallAppRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Info -type RecallAppResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Info *AppInfo `thrift:"info,2" db:"info" json:"info"` -} - -func NewRecallAppResponse() *RecallAppResponse { - return &RecallAppResponse{} -} - -var RecallAppResponse_Err_DEFAULT *base.ErrorCode - -func (p *RecallAppResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return RecallAppResponse_Err_DEFAULT - } - return p.Err -} - -var RecallAppResponse_Info_DEFAULT *AppInfo - -func (p *RecallAppResponse) GetInfo() *AppInfo { - if !p.IsSetInfo() { - return RecallAppResponse_Info_DEFAULT - } - return p.Info -} -func (p *RecallAppResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *RecallAppResponse) IsSetInfo() bool { - return p.Info != nil -} - -func (p *RecallAppResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RecallAppResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *RecallAppResponse) ReadField2(iprot thrift.TProtocol) error { - p.Info = &AppInfo{ - Status: 0, - - InitPartitionCount: -1, - } - if err := p.Info.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Info), err) - } - return nil -} - -func (p *RecallAppResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("recall_app_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RecallAppResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *RecallAppResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("info", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:info: ", p), err) - } - if err := p.Info.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Info), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:info: ", p), err) - } - return err -} - -func (p *RecallAppResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RecallAppResponse(%+v)", *p) -} - -// Attributes: -// - Status -// - AppType -// - AppName -// - AppID -// - PartitionCount -// - Envs -// - IsStateful -// - MaxReplicaCount -// - ExpireSecond -// - CreateSecond -// - DropSecond -// - Duplicating -// - InitPartitionCount -// - IsBulkLoading -type AppInfo struct { - Status AppStatus `thrift:"status,1" db:"status" json:"status"` - AppType string `thrift:"app_type,2" db:"app_type" json:"app_type"` - AppName string `thrift:"app_name,3" db:"app_name" json:"app_name"` - AppID int32 `thrift:"app_id,4" db:"app_id" json:"app_id"` - PartitionCount int32 `thrift:"partition_count,5" db:"partition_count" json:"partition_count"` - Envs map[string]string `thrift:"envs,6" db:"envs" json:"envs"` - IsStateful bool `thrift:"is_stateful,7" db:"is_stateful" json:"is_stateful"` - MaxReplicaCount int32 `thrift:"max_replica_count,8" db:"max_replica_count" json:"max_replica_count"` - ExpireSecond int64 `thrift:"expire_second,9" db:"expire_second" json:"expire_second"` - CreateSecond int64 `thrift:"create_second,10" db:"create_second" json:"create_second"` - DropSecond int64 `thrift:"drop_second,11" db:"drop_second" json:"drop_second"` - Duplicating *bool `thrift:"duplicating,12" db:"duplicating" json:"duplicating,omitempty"` - InitPartitionCount int32 `thrift:"init_partition_count,13" db:"init_partition_count" json:"init_partition_count"` - IsBulkLoading bool `thrift:"is_bulk_loading,14" db:"is_bulk_loading" json:"is_bulk_loading"` -} - -func NewAppInfo() *AppInfo { - return &AppInfo{ - Status: 0, - - InitPartitionCount: -1, - } -} - -func (p *AppInfo) GetStatus() AppStatus { - return p.Status -} - -func (p *AppInfo) GetAppType() string { - return p.AppType -} - -func (p *AppInfo) GetAppName() string { - return p.AppName -} - -func (p *AppInfo) GetAppID() int32 { - return p.AppID -} - -func (p *AppInfo) GetPartitionCount() int32 { - return p.PartitionCount -} - -func (p *AppInfo) GetEnvs() map[string]string { - return p.Envs -} - -func (p *AppInfo) GetIsStateful() bool { - return p.IsStateful -} - -func (p *AppInfo) GetMaxReplicaCount() int32 { - return p.MaxReplicaCount -} - -func (p *AppInfo) GetExpireSecond() int64 { - return p.ExpireSecond -} - -func (p *AppInfo) GetCreateSecond() int64 { - return p.CreateSecond -} - -func (p *AppInfo) GetDropSecond() int64 { - return p.DropSecond -} - -var AppInfo_Duplicating_DEFAULT bool - -func (p *AppInfo) GetDuplicating() bool { - if !p.IsSetDuplicating() { - return AppInfo_Duplicating_DEFAULT - } - return *p.Duplicating -} - -func (p *AppInfo) GetInitPartitionCount() int32 { - return p.InitPartitionCount -} - -var AppInfo_IsBulkLoading_DEFAULT bool = false - -func (p *AppInfo) GetIsBulkLoading() bool { - return p.IsBulkLoading -} -func (p *AppInfo) IsSetDuplicating() bool { - return p.Duplicating != nil -} - -func (p *AppInfo) IsSetIsBulkLoading() bool { - return p.IsBulkLoading != AppInfo_IsBulkLoading_DEFAULT -} - -func (p *AppInfo) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.MAP { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.I32 { - if err := p.ReadField8(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err := p.ReadField9(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err := p.ReadField10(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.I64 { - if err := p.ReadField11(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 12: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField12(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 13: - if fieldTypeId == thrift.I32 { - if err := p.ReadField13(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 14: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField14(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AppInfo) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := AppStatus(v) - p.Status = temp - } - return nil -} - -func (p *AppInfo) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.AppType = v - } - return nil -} - -func (p *AppInfo) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *AppInfo) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *AppInfo) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.PartitionCount = v - } - return nil -} - -func (p *AppInfo) ReadField6(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string]string, size) - p.Envs = tMap - for i := 0; i < size; i++ { - var _key2 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key2 = v - } - var _val3 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val3 = v - } - p.Envs[_key2] = _val3 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *AppInfo) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.IsStateful = v - } - return nil -} - -func (p *AppInfo) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.MaxReplicaCount = v - } - return nil -} - -func (p *AppInfo) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.ExpireSecond = v - } - return nil -} - -func (p *AppInfo) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 10: ", err) - } else { - p.CreateSecond = v - } - return nil -} - -func (p *AppInfo) ReadField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 11: ", err) - } else { - p.DropSecond = v - } - return nil -} - -func (p *AppInfo) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 12: ", err) - } else { - p.Duplicating = &v - } - return nil -} - -func (p *AppInfo) ReadField13(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 13: ", err) - } else { - p.InitPartitionCount = v - } - return nil -} - -func (p *AppInfo) ReadField14(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 14: ", err) - } else { - p.IsBulkLoading = v - } - return nil -} - -func (p *AppInfo) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("app_info"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - if err := p.writeField12(oprot); err != nil { - return err - } - if err := p.writeField13(oprot); err != nil { - return err - } - if err := p.writeField14(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AppInfo) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) - } - if err := oprot.WriteI32(int32(p.Status)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) - } - return err -} - -func (p *AppInfo) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_type: ", p), err) - } - if err := oprot.WriteString(string(p.AppType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_type (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_type: ", p), err) - } - return err -} - -func (p *AppInfo) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_name: ", p), err) - } - return err -} - -func (p *AppInfo) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_id: ", p), err) - } - return err -} - -func (p *AppInfo) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partition_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_count (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partition_count: ", p), err) - } - return err -} - -func (p *AppInfo) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("envs", thrift.MAP, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:envs: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Envs)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Envs { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:envs: ", p), err) - } - return err -} - -func (p *AppInfo) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_stateful: ", p), err) - } - if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.is_stateful (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_stateful: ", p), err) - } - return err -} - -func (p *AppInfo) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:max_replica_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:max_replica_count: ", p), err) - } - return err -} - -func (p *AppInfo) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("expire_second", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:expire_second: ", p), err) - } - if err := oprot.WriteI64(int64(p.ExpireSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.expire_second (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:expire_second: ", p), err) - } - return err -} - -func (p *AppInfo) writeField10(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("create_second", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:create_second: ", p), err) - } - if err := oprot.WriteI64(int64(p.CreateSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.create_second (10) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:create_second: ", p), err) - } - return err -} - -func (p *AppInfo) writeField11(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("drop_second", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:drop_second: ", p), err) - } - if err := oprot.WriteI64(int64(p.DropSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.drop_second (11) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:drop_second: ", p), err) - } - return err -} - -func (p *AppInfo) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetDuplicating() { - if err := oprot.WriteFieldBegin("duplicating", thrift.BOOL, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:duplicating: ", p), err) - } - if err := oprot.WriteBool(bool(*p.Duplicating)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duplicating (12) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:duplicating: ", p), err) - } - } - return err -} - -func (p *AppInfo) writeField13(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("init_partition_count", thrift.I32, 13); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:init_partition_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.InitPartitionCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.init_partition_count (13) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 13:init_partition_count: ", p), err) - } - return err -} - -func (p *AppInfo) writeField14(oprot thrift.TProtocol) (err error) { - if p.IsSetIsBulkLoading() { - if err := oprot.WriteFieldBegin("is_bulk_loading", thrift.BOOL, 14); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:is_bulk_loading: ", p), err) - } - if err := oprot.WriteBool(bool(p.IsBulkLoading)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.is_bulk_loading (14) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 14:is_bulk_loading: ", p), err) - } - } - return err -} - -func (p *AppInfo) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AppInfo(%+v)", *p) -} - -// Attributes: -// - Status -type ListAppsRequest struct { - Status AppStatus `thrift:"status,1" db:"status" json:"status"` -} - -func NewListAppsRequest() *ListAppsRequest { - return &ListAppsRequest{ - Status: 0, - } -} - -func (p *ListAppsRequest) GetStatus() AppStatus { - return p.Status -} -func (p *ListAppsRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ListAppsRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := AppStatus(v) - p.Status = temp - } - return nil -} - -func (p *ListAppsRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_apps_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ListAppsRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) - } - if err := oprot.WriteI32(int32(p.Status)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) - } - return err -} - -func (p *ListAppsRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ListAppsRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Infos -type ListAppsResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Infos []*AppInfo `thrift:"infos,2" db:"infos" json:"infos"` -} - -func NewListAppsResponse() *ListAppsResponse { - return &ListAppsResponse{} -} - -var ListAppsResponse_Err_DEFAULT *base.ErrorCode - -func (p *ListAppsResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return ListAppsResponse_Err_DEFAULT - } - return p.Err -} - -func (p *ListAppsResponse) GetInfos() []*AppInfo { - return p.Infos -} -func (p *ListAppsResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *ListAppsResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ListAppsResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *ListAppsResponse) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*AppInfo, 0, size) - p.Infos = tSlice - for i := 0; i < size; i++ { - _elem4 := &AppInfo{ - Status: 0, - - InitPartitionCount: -1, - } - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Infos = append(p.Infos, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ListAppsResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_apps_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ListAppsResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *ListAppsResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("infos", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infos: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Infos)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Infos { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infos: ", p), err) - } - return err -} - -func (p *ListAppsResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ListAppsResponse(%+v)", *p) -} - -// Attributes: -// - MetaServer -type QueryAppInfoRequest struct { - MetaServer *base.RPCAddress `thrift:"meta_server,1" db:"meta_server" json:"meta_server"` -} - -func NewQueryAppInfoRequest() *QueryAppInfoRequest { - return &QueryAppInfoRequest{} -} - -var QueryAppInfoRequest_MetaServer_DEFAULT *base.RPCAddress - -func (p *QueryAppInfoRequest) GetMetaServer() *base.RPCAddress { - if !p.IsSetMetaServer() { - return QueryAppInfoRequest_MetaServer_DEFAULT - } - return p.MetaServer -} -func (p *QueryAppInfoRequest) IsSetMetaServer() bool { - return p.MetaServer != nil -} - -func (p *QueryAppInfoRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryAppInfoRequest) ReadField1(iprot thrift.TProtocol) error { - p.MetaServer = &base.RPCAddress{} - if err := p.MetaServer.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MetaServer), err) - } - return nil -} - -func (p *QueryAppInfoRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_app_info_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryAppInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("meta_server", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:meta_server: ", p), err) - } - if err := p.MetaServer.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MetaServer), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:meta_server: ", p), err) - } - return err -} - -func (p *QueryAppInfoRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryAppInfoRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Apps -type QueryAppInfoResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Apps []*AppInfo `thrift:"apps,2" db:"apps" json:"apps"` -} - -func NewQueryAppInfoResponse() *QueryAppInfoResponse { - return &QueryAppInfoResponse{} -} - -var QueryAppInfoResponse_Err_DEFAULT *base.ErrorCode - -func (p *QueryAppInfoResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return QueryAppInfoResponse_Err_DEFAULT - } - return p.Err -} - -func (p *QueryAppInfoResponse) GetApps() []*AppInfo { - return p.Apps -} -func (p *QueryAppInfoResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *QueryAppInfoResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryAppInfoResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *QueryAppInfoResponse) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*AppInfo, 0, size) - p.Apps = tSlice - for i := 0; i < size; i++ { - _elem5 := &AppInfo{ - Status: 0, - - InitPartitionCount: -1, - } - if err := _elem5.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) - } - p.Apps = append(p.Apps, _elem5) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *QueryAppInfoResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_app_info_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryAppInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *QueryAppInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("apps", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:apps: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Apps)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Apps { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:apps: ", p), err) - } - return err -} - -func (p *QueryAppInfoResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryAppInfoResponse(%+v)", *p) -} - -// Attributes: -// - AppName -// - Op -// - Keys -// - Values -// - ClearPrefix -type UpdateAppEnvRequest struct { - AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` - Op AppEnvOperation `thrift:"op,2" db:"op" json:"op"` - Keys []string `thrift:"keys,3" db:"keys" json:"keys,omitempty"` - Values []string `thrift:"values,4" db:"values" json:"values,omitempty"` - ClearPrefix *string `thrift:"clear_prefix,5" db:"clear_prefix" json:"clear_prefix,omitempty"` -} - -func NewUpdateAppEnvRequest() *UpdateAppEnvRequest { - return &UpdateAppEnvRequest{ - Op: 0, - } -} - -func (p *UpdateAppEnvRequest) GetAppName() string { - return p.AppName -} - -func (p *UpdateAppEnvRequest) GetOp() AppEnvOperation { - return p.Op -} - -var UpdateAppEnvRequest_Keys_DEFAULT []string - -func (p *UpdateAppEnvRequest) GetKeys() []string { - return p.Keys -} - -var UpdateAppEnvRequest_Values_DEFAULT []string - -func (p *UpdateAppEnvRequest) GetValues() []string { - return p.Values -} - -var UpdateAppEnvRequest_ClearPrefix_DEFAULT string - -func (p *UpdateAppEnvRequest) GetClearPrefix() string { - if !p.IsSetClearPrefix() { - return UpdateAppEnvRequest_ClearPrefix_DEFAULT - } - return *p.ClearPrefix -} -func (p *UpdateAppEnvRequest) IsSetKeys() bool { - return p.Keys != nil -} - -func (p *UpdateAppEnvRequest) IsSetValues() bool { - return p.Values != nil -} - -func (p *UpdateAppEnvRequest) IsSetClearPrefix() bool { - return p.ClearPrefix != nil -} - -func (p *UpdateAppEnvRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *UpdateAppEnvRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *UpdateAppEnvRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := AppEnvOperation(v) - p.Op = temp - } - return nil -} - -func (p *UpdateAppEnvRequest) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.Keys = tSlice - for i := 0; i < size; i++ { - var _elem6 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem6 = v - } - p.Keys = append(p.Keys, _elem6) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *UpdateAppEnvRequest) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.Values = tSlice - for i := 0; i < size; i++ { - var _elem7 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem7 = v - } - p.Values = append(p.Values, _elem7) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *UpdateAppEnvRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.ClearPrefix = &v - } - return nil -} - -func (p *UpdateAppEnvRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("update_app_env_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *UpdateAppEnvRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) - } - return err -} - -func (p *UpdateAppEnvRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("op", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:op: ", p), err) - } - if err := oprot.WriteI32(int32(p.Op)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.op (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:op: ", p), err) - } - return err -} - -func (p *UpdateAppEnvRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetKeys() { - if err := oprot.WriteFieldBegin("keys", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:keys: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Keys)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Keys { - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:keys: ", p), err) - } - } - return err -} - -func (p *UpdateAppEnvRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetValues() { - if err := oprot.WriteFieldBegin("values", thrift.LIST, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:values: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Values { - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:values: ", p), err) - } - } - return err -} - -func (p *UpdateAppEnvRequest) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetClearPrefix() { - if err := oprot.WriteFieldBegin("clear_prefix", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:clear_prefix: ", p), err) - } - if err := oprot.WriteString(string(*p.ClearPrefix)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.clear_prefix (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:clear_prefix: ", p), err) - } - } - return err -} - -func (p *UpdateAppEnvRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("UpdateAppEnvRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - HintMessage -type UpdateAppEnvResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` -} - -func NewUpdateAppEnvResponse() *UpdateAppEnvResponse { - return &UpdateAppEnvResponse{} -} - -var UpdateAppEnvResponse_Err_DEFAULT *base.ErrorCode - -func (p *UpdateAppEnvResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return UpdateAppEnvResponse_Err_DEFAULT - } - return p.Err -} - -func (p *UpdateAppEnvResponse) GetHintMessage() string { - return p.HintMessage -} -func (p *UpdateAppEnvResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *UpdateAppEnvResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *UpdateAppEnvResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *UpdateAppEnvResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.HintMessage = v - } - return nil -} - -func (p *UpdateAppEnvResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("update_app_env_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *UpdateAppEnvResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *UpdateAppEnvResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) - } - if err := oprot.WriteString(string(p.HintMessage)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) - } - return err -} - -func (p *UpdateAppEnvResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("UpdateAppEnvResponse(%+v)", *p) -} - -// Attributes: -// - Status -// - Address -type NodeInfo struct { - Status NodeStatus `thrift:"status,1" db:"status" json:"status"` - Address *base.RPCAddress `thrift:"address,2" db:"address" json:"address"` -} - -func NewNodeInfo() *NodeInfo { - return &NodeInfo{ - Status: 0, - } -} - -func (p *NodeInfo) GetStatus() NodeStatus { - return p.Status -} - -var NodeInfo_Address_DEFAULT *base.RPCAddress - -func (p *NodeInfo) GetAddress() *base.RPCAddress { - if !p.IsSetAddress() { - return NodeInfo_Address_DEFAULT - } - return p.Address -} -func (p *NodeInfo) IsSetAddress() bool { - return p.Address != nil -} - -func (p *NodeInfo) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *NodeInfo) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := NodeStatus(v) - p.Status = temp - } - return nil -} - -func (p *NodeInfo) ReadField2(iprot thrift.TProtocol) error { - p.Address = &base.RPCAddress{} - if err := p.Address.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Address), err) - } - return nil -} - -func (p *NodeInfo) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("node_info"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *NodeInfo) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) - } - if err := oprot.WriteI32(int32(p.Status)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) - } - return err -} - -func (p *NodeInfo) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("address", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:address: ", p), err) - } - if err := p.Address.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Address), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:address: ", p), err) - } - return err -} - -func (p *NodeInfo) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("NodeInfo(%+v)", *p) -} - -// Attributes: -// - Status -type ListNodesRequest struct { - Status NodeStatus `thrift:"status,1" db:"status" json:"status"` -} - -func NewListNodesRequest() *ListNodesRequest { - return &ListNodesRequest{ - Status: 0, - } -} - -func (p *ListNodesRequest) GetStatus() NodeStatus { - return p.Status -} -func (p *ListNodesRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ListNodesRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := NodeStatus(v) - p.Status = temp - } - return nil -} - -func (p *ListNodesRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_nodes_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ListNodesRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) - } - if err := oprot.WriteI32(int32(p.Status)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) - } - return err -} - -func (p *ListNodesRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ListNodesRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Infos -type ListNodesResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Infos []*NodeInfo `thrift:"infos,2" db:"infos" json:"infos"` -} - -func NewListNodesResponse() *ListNodesResponse { - return &ListNodesResponse{} -} - -var ListNodesResponse_Err_DEFAULT *base.ErrorCode - -func (p *ListNodesResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return ListNodesResponse_Err_DEFAULT - } - return p.Err -} - -func (p *ListNodesResponse) GetInfos() []*NodeInfo { - return p.Infos -} -func (p *ListNodesResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *ListNodesResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ListNodesResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *ListNodesResponse) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*NodeInfo, 0, size) - p.Infos = tSlice - for i := 0; i < size; i++ { - _elem8 := &NodeInfo{ - Status: 0, - } - if err := _elem8.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) - } - p.Infos = append(p.Infos, _elem8) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ListNodesResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_nodes_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ListNodesResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *ListNodesResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("infos", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infos: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Infos)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Infos { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infos: ", p), err) - } - return err -} - -func (p *ListNodesResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ListNodesResponse(%+v)", *p) -} - -type ClusterInfoRequest struct { -} - -func NewClusterInfoRequest() *ClusterInfoRequest { - return &ClusterInfoRequest{} -} - -func (p *ClusterInfoRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ClusterInfoRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("cluster_info_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ClusterInfoRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ClusterInfoRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Keys -// - Values -type ClusterInfoResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Keys []string `thrift:"keys,2" db:"keys" json:"keys"` - Values []string `thrift:"values,3" db:"values" json:"values"` -} - -func NewClusterInfoResponse() *ClusterInfoResponse { - return &ClusterInfoResponse{} -} - -var ClusterInfoResponse_Err_DEFAULT *base.ErrorCode - -func (p *ClusterInfoResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return ClusterInfoResponse_Err_DEFAULT - } - return p.Err -} - -func (p *ClusterInfoResponse) GetKeys() []string { - return p.Keys -} - -func (p *ClusterInfoResponse) GetValues() []string { - return p.Values -} -func (p *ClusterInfoResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *ClusterInfoResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ClusterInfoResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *ClusterInfoResponse) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.Keys = tSlice - for i := 0; i < size; i++ { - var _elem9 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem9 = v - } - p.Keys = append(p.Keys, _elem9) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ClusterInfoResponse) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.Values = tSlice - for i := 0; i < size; i++ { - var _elem10 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem10 = v - } - p.Values = append(p.Values, _elem10) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ClusterInfoResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("cluster_info_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ClusterInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *ClusterInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("keys", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:keys: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Keys)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Keys { - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:keys: ", p), err) - } - return err -} - -func (p *ClusterInfoResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("values", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:values: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Values { - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:values: ", p), err) - } - return err -} - -func (p *ClusterInfoResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ClusterInfoResponse(%+v)", *p) -} - -// Attributes: -// - Level -type MetaControlRequest struct { - Level MetaFunctionLevel `thrift:"level,1" db:"level" json:"level"` -} - -func NewMetaControlRequest() *MetaControlRequest { - return &MetaControlRequest{} -} - -func (p *MetaControlRequest) GetLevel() MetaFunctionLevel { - return p.Level -} -func (p *MetaControlRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MetaControlRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := MetaFunctionLevel(v) - p.Level = temp - } - return nil -} - -func (p *MetaControlRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("meta_control_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MetaControlRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("level", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:level: ", p), err) - } - if err := oprot.WriteI32(int32(p.Level)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.level (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:level: ", p), err) - } - return err -} - -func (p *MetaControlRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MetaControlRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - OldLevel -type MetaControlResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - OldLevel MetaFunctionLevel `thrift:"old_level,2" db:"old_level" json:"old_level"` -} - -func NewMetaControlResponse() *MetaControlResponse { - return &MetaControlResponse{} -} - -var MetaControlResponse_Err_DEFAULT *base.ErrorCode - -func (p *MetaControlResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return MetaControlResponse_Err_DEFAULT - } - return p.Err -} - -func (p *MetaControlResponse) GetOldLevel() MetaFunctionLevel { - return p.OldLevel -} -func (p *MetaControlResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *MetaControlResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MetaControlResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *MetaControlResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := MetaFunctionLevel(v) - p.OldLevel = temp - } - return nil -} - -func (p *MetaControlResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("meta_control_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MetaControlResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *MetaControlResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("old_level", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:old_level: ", p), err) - } - if err := oprot.WriteI32(int32(p.OldLevel)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.old_level (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:old_level: ", p), err) - } - return err -} - -func (p *MetaControlResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MetaControlResponse(%+v)", *p) -} - -// Attributes: -// - AppName -// - RemoteClusterName -// - Freezed -type DuplicationAddRequest struct { - AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` - RemoteClusterName string `thrift:"remote_cluster_name,2" db:"remote_cluster_name" json:"remote_cluster_name"` - Freezed bool `thrift:"freezed,3" db:"freezed" json:"freezed"` -} - -func NewDuplicationAddRequest() *DuplicationAddRequest { - return &DuplicationAddRequest{} -} - -func (p *DuplicationAddRequest) GetAppName() string { - return p.AppName -} - -func (p *DuplicationAddRequest) GetRemoteClusterName() string { - return p.RemoteClusterName -} - -func (p *DuplicationAddRequest) GetFreezed() bool { - return p.Freezed -} -func (p *DuplicationAddRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DuplicationAddRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *DuplicationAddRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.RemoteClusterName = v - } - return nil -} - -func (p *DuplicationAddRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Freezed = v - } - return nil -} - -func (p *DuplicationAddRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("duplication_add_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DuplicationAddRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) - } - return err -} - -func (p *DuplicationAddRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("remote_cluster_name", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:remote_cluster_name: ", p), err) - } - if err := oprot.WriteString(string(p.RemoteClusterName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.remote_cluster_name (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:remote_cluster_name: ", p), err) - } - return err -} - -func (p *DuplicationAddRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("freezed", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:freezed: ", p), err) - } - if err := oprot.WriteBool(bool(p.Freezed)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.freezed (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:freezed: ", p), err) - } - return err -} - -func (p *DuplicationAddRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DuplicationAddRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Appid -// - Dupid -// - Hint -type DuplicationAddResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` - Dupid int32 `thrift:"dupid,3" db:"dupid" json:"dupid"` - Hint *string `thrift:"hint,4" db:"hint" json:"hint,omitempty"` -} - -func NewDuplicationAddResponse() *DuplicationAddResponse { - return &DuplicationAddResponse{} -} - -var DuplicationAddResponse_Err_DEFAULT *base.ErrorCode - -func (p *DuplicationAddResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return DuplicationAddResponse_Err_DEFAULT - } - return p.Err -} - -func (p *DuplicationAddResponse) GetAppid() int32 { - return p.Appid -} - -func (p *DuplicationAddResponse) GetDupid() int32 { - return p.Dupid -} - -var DuplicationAddResponse_Hint_DEFAULT string - -func (p *DuplicationAddResponse) GetHint() string { - if !p.IsSetHint() { - return DuplicationAddResponse_Hint_DEFAULT - } - return *p.Hint -} -func (p *DuplicationAddResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *DuplicationAddResponse) IsSetHint() bool { - return p.Hint != nil -} - -func (p *DuplicationAddResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DuplicationAddResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *DuplicationAddResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Appid = v - } - return nil -} - -func (p *DuplicationAddResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Dupid = v - } - return nil -} - -func (p *DuplicationAddResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Hint = &v - } - return nil -} - -func (p *DuplicationAddResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("duplication_add_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DuplicationAddResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *DuplicationAddResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) - } - if err := oprot.WriteI32(int32(p.Appid)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) - } - return err -} - -func (p *DuplicationAddResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("dupid", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:dupid: ", p), err) - } - if err := oprot.WriteI32(int32(p.Dupid)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.dupid (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:dupid: ", p), err) - } - return err -} - -func (p *DuplicationAddResponse) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetHint() { - if err := oprot.WriteFieldBegin("hint", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint: ", p), err) - } - if err := oprot.WriteString(string(*p.Hint)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.hint (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint: ", p), err) - } - } - return err -} - -func (p *DuplicationAddResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DuplicationAddResponse(%+v)", *p) -} - -// Attributes: -// - AppName -// - Dupid -// - Status -// - FailMode -type DuplicationModifyRequest struct { - AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` - Dupid int32 `thrift:"dupid,2" db:"dupid" json:"dupid"` - Status *DuplicationStatus `thrift:"status,3" db:"status" json:"status,omitempty"` - FailMode *DuplicationFailMode `thrift:"fail_mode,4" db:"fail_mode" json:"fail_mode,omitempty"` -} - -func NewDuplicationModifyRequest() *DuplicationModifyRequest { - return &DuplicationModifyRequest{} -} - -func (p *DuplicationModifyRequest) GetAppName() string { - return p.AppName -} - -func (p *DuplicationModifyRequest) GetDupid() int32 { - return p.Dupid -} - -var DuplicationModifyRequest_Status_DEFAULT DuplicationStatus - -func (p *DuplicationModifyRequest) GetStatus() DuplicationStatus { - if !p.IsSetStatus() { - return DuplicationModifyRequest_Status_DEFAULT - } - return *p.Status -} - -var DuplicationModifyRequest_FailMode_DEFAULT DuplicationFailMode - -func (p *DuplicationModifyRequest) GetFailMode() DuplicationFailMode { - if !p.IsSetFailMode() { - return DuplicationModifyRequest_FailMode_DEFAULT - } - return *p.FailMode -} -func (p *DuplicationModifyRequest) IsSetStatus() bool { - return p.Status != nil -} - -func (p *DuplicationModifyRequest) IsSetFailMode() bool { - return p.FailMode != nil -} - -func (p *DuplicationModifyRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DuplicationModifyRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *DuplicationModifyRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Dupid = v - } - return nil -} - -func (p *DuplicationModifyRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := DuplicationStatus(v) - p.Status = &temp - } - return nil -} - -func (p *DuplicationModifyRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - temp := DuplicationFailMode(v) - p.FailMode = &temp - } - return nil -} - -func (p *DuplicationModifyRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("duplication_modify_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DuplicationModifyRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) - } - return err -} - -func (p *DuplicationModifyRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("dupid", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dupid: ", p), err) - } - if err := oprot.WriteI32(int32(p.Dupid)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.dupid (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dupid: ", p), err) - } - return err -} - -func (p *DuplicationModifyRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetStatus() { - if err := oprot.WriteFieldBegin("status", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:status: ", p), err) - } - if err := oprot.WriteI32(int32(*p.Status)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.status (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:status: ", p), err) - } - } - return err -} - -func (p *DuplicationModifyRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetFailMode() { - if err := oprot.WriteFieldBegin("fail_mode", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:fail_mode: ", p), err) - } - if err := oprot.WriteI32(int32(*p.FailMode)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.fail_mode (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:fail_mode: ", p), err) - } - } - return err -} - -func (p *DuplicationModifyRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DuplicationModifyRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Appid -type DuplicationModifyResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` -} - -func NewDuplicationModifyResponse() *DuplicationModifyResponse { - return &DuplicationModifyResponse{} -} - -var DuplicationModifyResponse_Err_DEFAULT *base.ErrorCode - -func (p *DuplicationModifyResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return DuplicationModifyResponse_Err_DEFAULT - } - return p.Err -} - -func (p *DuplicationModifyResponse) GetAppid() int32 { - return p.Appid -} -func (p *DuplicationModifyResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *DuplicationModifyResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DuplicationModifyResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *DuplicationModifyResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Appid = v - } - return nil -} - -func (p *DuplicationModifyResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("duplication_modify_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DuplicationModifyResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *DuplicationModifyResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) - } - if err := oprot.WriteI32(int32(p.Appid)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) - } - return err -} - -func (p *DuplicationModifyResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DuplicationModifyResponse(%+v)", *p) -} - -// Attributes: -// - Dupid -// - Status -// - Remote -// - CreateTs -// - Progress -// - FailMode -type DuplicationEntry struct { - Dupid int32 `thrift:"dupid,1" db:"dupid" json:"dupid"` - Status DuplicationStatus `thrift:"status,2" db:"status" json:"status"` - Remote string `thrift:"remote,3" db:"remote" json:"remote"` - CreateTs int64 `thrift:"create_ts,4" db:"create_ts" json:"create_ts"` - Progress map[int32]int64 `thrift:"progress,5" db:"progress" json:"progress,omitempty"` - // unused field # 6 - FailMode *DuplicationFailMode `thrift:"fail_mode,7" db:"fail_mode" json:"fail_mode,omitempty"` -} - -func NewDuplicationEntry() *DuplicationEntry { - return &DuplicationEntry{} -} - -func (p *DuplicationEntry) GetDupid() int32 { - return p.Dupid -} - -func (p *DuplicationEntry) GetStatus() DuplicationStatus { - return p.Status -} - -func (p *DuplicationEntry) GetRemote() string { - return p.Remote -} - -func (p *DuplicationEntry) GetCreateTs() int64 { - return p.CreateTs -} - -var DuplicationEntry_Progress_DEFAULT map[int32]int64 - -func (p *DuplicationEntry) GetProgress() map[int32]int64 { - return p.Progress -} - -var DuplicationEntry_FailMode_DEFAULT DuplicationFailMode - -func (p *DuplicationEntry) GetFailMode() DuplicationFailMode { - if !p.IsSetFailMode() { - return DuplicationEntry_FailMode_DEFAULT - } - return *p.FailMode -} -func (p *DuplicationEntry) IsSetProgress() bool { - return p.Progress != nil -} - -func (p *DuplicationEntry) IsSetFailMode() bool { - return p.FailMode != nil -} - -func (p *DuplicationEntry) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.MAP { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DuplicationEntry) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Dupid = v - } - return nil -} - -func (p *DuplicationEntry) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := DuplicationStatus(v) - p.Status = temp - } - return nil -} - -func (p *DuplicationEntry) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Remote = v - } - return nil -} - -func (p *DuplicationEntry) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.CreateTs = v - } - return nil -} - -func (p *DuplicationEntry) ReadField5(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[int32]int64, size) - p.Progress = tMap - for i := 0; i < size; i++ { - var _key11 int32 - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key11 = v - } - var _val12 int64 - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val12 = v - } - p.Progress[_key11] = _val12 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *DuplicationEntry) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - temp := DuplicationFailMode(v) - p.FailMode = &temp - } - return nil -} - -func (p *DuplicationEntry) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("duplication_entry"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DuplicationEntry) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("dupid", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:dupid: ", p), err) - } - if err := oprot.WriteI32(int32(p.Dupid)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.dupid (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:dupid: ", p), err) - } - return err -} - -func (p *DuplicationEntry) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("status", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:status: ", p), err) - } - if err := oprot.WriteI32(int32(p.Status)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.status (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:status: ", p), err) - } - return err -} - -func (p *DuplicationEntry) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("remote", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:remote: ", p), err) - } - if err := oprot.WriteString(string(p.Remote)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.remote (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:remote: ", p), err) - } - return err -} - -func (p *DuplicationEntry) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("create_ts", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:create_ts: ", p), err) - } - if err := oprot.WriteI64(int64(p.CreateTs)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.create_ts (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:create_ts: ", p), err) - } - return err -} - -func (p *DuplicationEntry) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetProgress() { - if err := oprot.WriteFieldBegin("progress", thrift.MAP, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:progress: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.I64, len(p.Progress)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Progress { - if err := oprot.WriteI32(int32(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteI64(int64(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:progress: ", p), err) - } - } - return err -} - -func (p *DuplicationEntry) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetFailMode() { - if err := oprot.WriteFieldBegin("fail_mode", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:fail_mode: ", p), err) - } - if err := oprot.WriteI32(int32(*p.FailMode)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.fail_mode (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:fail_mode: ", p), err) - } - } - return err -} - -func (p *DuplicationEntry) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DuplicationEntry(%+v)", *p) -} - -// Attributes: -// - AppName -type DuplicationQueryRequest struct { - AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` -} - -func NewDuplicationQueryRequest() *DuplicationQueryRequest { - return &DuplicationQueryRequest{} -} - -func (p *DuplicationQueryRequest) GetAppName() string { - return p.AppName -} -func (p *DuplicationQueryRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DuplicationQueryRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *DuplicationQueryRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("duplication_query_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DuplicationQueryRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) - } - return err -} - -func (p *DuplicationQueryRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DuplicationQueryRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Appid -// - EntryList -type DuplicationQueryResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - // unused field # 2 - Appid int32 `thrift:"appid,3" db:"appid" json:"appid"` - EntryList []*DuplicationEntry `thrift:"entry_list,4" db:"entry_list" json:"entry_list"` -} - -func NewDuplicationQueryResponse() *DuplicationQueryResponse { - return &DuplicationQueryResponse{} -} - -var DuplicationQueryResponse_Err_DEFAULT *base.ErrorCode - -func (p *DuplicationQueryResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return DuplicationQueryResponse_Err_DEFAULT - } - return p.Err -} - -func (p *DuplicationQueryResponse) GetAppid() int32 { - return p.Appid -} - -func (p *DuplicationQueryResponse) GetEntryList() []*DuplicationEntry { - return p.EntryList -} -func (p *DuplicationQueryResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *DuplicationQueryResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DuplicationQueryResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *DuplicationQueryResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Appid = v - } - return nil -} - -func (p *DuplicationQueryResponse) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*DuplicationEntry, 0, size) - p.EntryList = tSlice - for i := 0; i < size; i++ { - _elem13 := &DuplicationEntry{} - if err := _elem13.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem13), err) - } - p.EntryList = append(p.EntryList, _elem13) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *DuplicationQueryResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("duplication_query_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DuplicationQueryResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *DuplicationQueryResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("appid", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:appid: ", p), err) - } - if err := oprot.WriteI32(int32(p.Appid)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.appid (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:appid: ", p), err) - } - return err -} - -func (p *DuplicationQueryResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("entry_list", thrift.LIST, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:entry_list: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.EntryList)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.EntryList { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:entry_list: ", p), err) - } - return err -} - -func (p *DuplicationQueryResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DuplicationQueryResponse(%+v)", *p) -} - -// Attributes: -// - PolicyName -// - BackupProviderType -// - BackupIntervalSeconds -// - AppIds -// - BackupHistoryCountToKeep -// - StartTime -// - IsDisable -type PolicyEntry struct { - PolicyName string `thrift:"policy_name,1" db:"policy_name" json:"policy_name"` - BackupProviderType string `thrift:"backup_provider_type,2" db:"backup_provider_type" json:"backup_provider_type"` - BackupIntervalSeconds string `thrift:"backup_interval_seconds,3" db:"backup_interval_seconds" json:"backup_interval_seconds"` - AppIds []int32 `thrift:"app_ids,4" db:"app_ids" json:"app_ids"` - BackupHistoryCountToKeep int32 `thrift:"backup_history_count_to_keep,5" db:"backup_history_count_to_keep" json:"backup_history_count_to_keep"` - StartTime string `thrift:"start_time,6" db:"start_time" json:"start_time"` - IsDisable bool `thrift:"is_disable,7" db:"is_disable" json:"is_disable"` -} - -func NewPolicyEntry() *PolicyEntry { - return &PolicyEntry{} -} - -func (p *PolicyEntry) GetPolicyName() string { - return p.PolicyName -} - -func (p *PolicyEntry) GetBackupProviderType() string { - return p.BackupProviderType -} - -func (p *PolicyEntry) GetBackupIntervalSeconds() string { - return p.BackupIntervalSeconds -} - -func (p *PolicyEntry) GetAppIds() []int32 { - return p.AppIds -} - -func (p *PolicyEntry) GetBackupHistoryCountToKeep() int32 { - return p.BackupHistoryCountToKeep -} - -func (p *PolicyEntry) GetStartTime() string { - return p.StartTime -} - -func (p *PolicyEntry) GetIsDisable() bool { - return p.IsDisable -} -func (p *PolicyEntry) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.SET { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *PolicyEntry) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.PolicyName = v - } - return nil -} - -func (p *PolicyEntry) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.BackupProviderType = v - } - return nil -} - -func (p *PolicyEntry) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.BackupIntervalSeconds = v - } - return nil -} - -func (p *PolicyEntry) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadSetBegin() - if err != nil { - return thrift.PrependError("error reading set begin: ", err) - } - tSet := make([]int32, 0, size) - p.AppIds = tSet - for i := 0; i < size; i++ { - var _elem14 int32 - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem14 = v - } - p.AppIds = append(p.AppIds, _elem14) - } - if err := iprot.ReadSetEnd(); err != nil { - return thrift.PrependError("error reading set end: ", err) - } - return nil -} - -func (p *PolicyEntry) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.BackupHistoryCountToKeep = v - } - return nil -} - -func (p *PolicyEntry) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.StartTime = v - } - return nil -} - -func (p *PolicyEntry) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.IsDisable = v - } - return nil -} - -func (p *PolicyEntry) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("policy_entry"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *PolicyEntry) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_name: ", p), err) - } - if err := oprot.WriteString(string(p.PolicyName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.policy_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_name: ", p), err) - } - return err -} - -func (p *PolicyEntry) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_provider_type: ", p), err) - } - if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_provider_type: ", p), err) - } - return err -} - -func (p *PolicyEntry) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("backup_interval_seconds", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_interval_seconds: ", p), err) - } - if err := oprot.WriteString(string(p.BackupIntervalSeconds)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.backup_interval_seconds (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_interval_seconds: ", p), err) - } - return err -} - -func (p *PolicyEntry) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_ids", thrift.SET, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_ids: ", p), err) - } - if err := oprot.WriteSetBegin(thrift.I32, len(p.AppIds)); err != nil { - return thrift.PrependError("error writing set begin: ", err) - } - for i := 0; i < len(p.AppIds); i++ { - for j := i + 1; j < len(p.AppIds); j++ { - if reflect.DeepEqual(p.AppIds[i], p.AppIds[j]) { - return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.AppIds[i])) - } - } - } - for _, v := range p.AppIds { - if err := oprot.WriteI32(int32(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteSetEnd(); err != nil { - return thrift.PrependError("error writing set end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_ids: ", p), err) - } - return err -} - -func (p *PolicyEntry) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("backup_history_count_to_keep", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_history_count_to_keep: ", p), err) - } - if err := oprot.WriteI32(int32(p.BackupHistoryCountToKeep)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.backup_history_count_to_keep (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_history_count_to_keep: ", p), err) - } - return err -} - -func (p *PolicyEntry) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("start_time", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_time: ", p), err) - } - if err := oprot.WriteString(string(p.StartTime)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.start_time (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_time: ", p), err) - } - return err -} - -func (p *PolicyEntry) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("is_disable", thrift.BOOL, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_disable: ", p), err) - } - if err := oprot.WriteBool(bool(p.IsDisable)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.is_disable (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_disable: ", p), err) - } - return err -} - -func (p *PolicyEntry) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PolicyEntry(%+v)", *p) -} - -// Attributes: -// - BackupID -// - StartTimeMs -// - EndTimeMs -// - AppIds -type BackupEntry struct { - BackupID int64 `thrift:"backup_id,1" db:"backup_id" json:"backup_id"` - StartTimeMs int64 `thrift:"start_time_ms,2" db:"start_time_ms" json:"start_time_ms"` - EndTimeMs int64 `thrift:"end_time_ms,3" db:"end_time_ms" json:"end_time_ms"` - AppIds []int32 `thrift:"app_ids,4" db:"app_ids" json:"app_ids"` -} - -func NewBackupEntry() *BackupEntry { - return &BackupEntry{} -} - -func (p *BackupEntry) GetBackupID() int64 { - return p.BackupID -} - -func (p *BackupEntry) GetStartTimeMs() int64 { - return p.StartTimeMs -} - -func (p *BackupEntry) GetEndTimeMs() int64 { - return p.EndTimeMs -} - -func (p *BackupEntry) GetAppIds() []int32 { - return p.AppIds -} -func (p *BackupEntry) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.SET { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BackupEntry) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.BackupID = v - } - return nil -} - -func (p *BackupEntry) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.StartTimeMs = v - } - return nil -} - -func (p *BackupEntry) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.EndTimeMs = v - } - return nil -} - -func (p *BackupEntry) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadSetBegin() - if err != nil { - return thrift.PrependError("error reading set begin: ", err) - } - tSet := make([]int32, 0, size) - p.AppIds = tSet - for i := 0; i < size; i++ { - var _elem15 int32 - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem15 = v - } - p.AppIds = append(p.AppIds, _elem15) - } - if err := iprot.ReadSetEnd(); err != nil { - return thrift.PrependError("error reading set end: ", err) - } - return nil -} - -func (p *BackupEntry) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("backup_entry"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BackupEntry) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_id: ", p), err) - } - if err := oprot.WriteI64(int64(p.BackupID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.backup_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_id: ", p), err) - } - return err -} - -func (p *BackupEntry) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("start_time_ms", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:start_time_ms: ", p), err) - } - if err := oprot.WriteI64(int64(p.StartTimeMs)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.start_time_ms (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:start_time_ms: ", p), err) - } - return err -} - -func (p *BackupEntry) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("end_time_ms", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:end_time_ms: ", p), err) - } - if err := oprot.WriteI64(int64(p.EndTimeMs)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.end_time_ms (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:end_time_ms: ", p), err) - } - return err -} - -func (p *BackupEntry) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_ids", thrift.SET, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_ids: ", p), err) - } - if err := oprot.WriteSetBegin(thrift.I32, len(p.AppIds)); err != nil { - return thrift.PrependError("error writing set begin: ", err) - } - for i := 0; i < len(p.AppIds); i++ { - for j := i + 1; j < len(p.AppIds); j++ { - if reflect.DeepEqual(p.AppIds[i], p.AppIds[j]) { - return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.AppIds[i])) - } - } - } - for _, v := range p.AppIds { - if err := oprot.WriteI32(int32(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteSetEnd(); err != nil { - return thrift.PrependError("error writing set end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_ids: ", p), err) - } - return err -} - -func (p *BackupEntry) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BackupEntry(%+v)", *p) -} - -// Attributes: -// - PolicyNames -// - BackupInfoCount -type QueryBackupPolicyRequest struct { - PolicyNames []string `thrift:"policy_names,1" db:"policy_names" json:"policy_names"` - BackupInfoCount int32 `thrift:"backup_info_count,2" db:"backup_info_count" json:"backup_info_count"` -} - -func NewQueryBackupPolicyRequest() *QueryBackupPolicyRequest { - return &QueryBackupPolicyRequest{} -} - -func (p *QueryBackupPolicyRequest) GetPolicyNames() []string { - return p.PolicyNames -} - -func (p *QueryBackupPolicyRequest) GetBackupInfoCount() int32 { - return p.BackupInfoCount -} -func (p *QueryBackupPolicyRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryBackupPolicyRequest) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.PolicyNames = tSlice - for i := 0; i < size; i++ { - var _elem16 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem16 = v - } - p.PolicyNames = append(p.PolicyNames, _elem16) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *QueryBackupPolicyRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.BackupInfoCount = v - } - return nil -} - -func (p *QueryBackupPolicyRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_backup_policy_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryBackupPolicyRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("policy_names", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_names: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.PolicyNames)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PolicyNames { - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_names: ", p), err) - } - return err -} - -func (p *QueryBackupPolicyRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("backup_info_count", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_info_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.BackupInfoCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.backup_info_count (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_info_count: ", p), err) - } - return err -} - -func (p *QueryBackupPolicyRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryBackupPolicyRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Policys -// - BackupInfos -// - HintMsg -type QueryBackupPolicyResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Policys []*PolicyEntry `thrift:"policys,2" db:"policys" json:"policys"` - BackupInfos [][]*BackupEntry `thrift:"backup_infos,3" db:"backup_infos" json:"backup_infos"` - HintMsg *string `thrift:"hint_msg,4" db:"hint_msg" json:"hint_msg,omitempty"` -} - -func NewQueryBackupPolicyResponse() *QueryBackupPolicyResponse { - return &QueryBackupPolicyResponse{} -} - -var QueryBackupPolicyResponse_Err_DEFAULT *base.ErrorCode - -func (p *QueryBackupPolicyResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return QueryBackupPolicyResponse_Err_DEFAULT - } - return p.Err -} - -func (p *QueryBackupPolicyResponse) GetPolicys() []*PolicyEntry { - return p.Policys -} - -func (p *QueryBackupPolicyResponse) GetBackupInfos() [][]*BackupEntry { - return p.BackupInfos -} - -var QueryBackupPolicyResponse_HintMsg_DEFAULT string - -func (p *QueryBackupPolicyResponse) GetHintMsg() string { - if !p.IsSetHintMsg() { - return QueryBackupPolicyResponse_HintMsg_DEFAULT - } - return *p.HintMsg -} -func (p *QueryBackupPolicyResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *QueryBackupPolicyResponse) IsSetHintMsg() bool { - return p.HintMsg != nil -} - -func (p *QueryBackupPolicyResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryBackupPolicyResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *QueryBackupPolicyResponse) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*PolicyEntry, 0, size) - p.Policys = tSlice - for i := 0; i < size; i++ { - _elem17 := &PolicyEntry{} - if err := _elem17.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) - } - p.Policys = append(p.Policys, _elem17) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *QueryBackupPolicyResponse) ReadField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([][]*BackupEntry, 0, size) - p.BackupInfos = tSlice - for i := 0; i < size; i++ { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BackupEntry, 0, size) - _elem18 := tSlice - for i := 0; i < size; i++ { - _elem19 := &BackupEntry{} - if err := _elem19.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem19), err) - } - _elem18 = append(_elem18, _elem19) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - p.BackupInfos = append(p.BackupInfos, _elem18) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *QueryBackupPolicyResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.HintMsg = &v - } - return nil -} - -func (p *QueryBackupPolicyResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_backup_policy_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryBackupPolicyResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *QueryBackupPolicyResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("policys", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policys: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Policys)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Policys { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policys: ", p), err) - } - return err -} - -func (p *QueryBackupPolicyResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("backup_infos", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_infos: ", p), err) - } - if err := oprot.WriteListBegin(thrift.LIST, len(p.BackupInfos)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.BackupInfos { - if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_infos: ", p), err) - } - return err -} - -func (p *QueryBackupPolicyResponse) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetHintMsg() { - if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint_msg: ", p), err) - } - if err := oprot.WriteString(string(*p.HintMsg)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.hint_msg (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint_msg: ", p), err) - } - } - return err -} - -func (p *QueryBackupPolicyResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryBackupPolicyResponse(%+v)", *p) -} - -// Attributes: -// - Target -// - Node -// - Type -type ConfigurationProposalAction struct { - Target *base.RPCAddress `thrift:"target,1" db:"target" json:"target"` - Node *base.RPCAddress `thrift:"node,2" db:"node" json:"node"` - Type ConfigType `thrift:"type,3" db:"type" json:"type"` -} - -func NewConfigurationProposalAction() *ConfigurationProposalAction { - return &ConfigurationProposalAction{} -} - -var ConfigurationProposalAction_Target_DEFAULT *base.RPCAddress - -func (p *ConfigurationProposalAction) GetTarget() *base.RPCAddress { - if !p.IsSetTarget() { - return ConfigurationProposalAction_Target_DEFAULT - } - return p.Target -} - -var ConfigurationProposalAction_Node_DEFAULT *base.RPCAddress - -func (p *ConfigurationProposalAction) GetNode() *base.RPCAddress { - if !p.IsSetNode() { - return ConfigurationProposalAction_Node_DEFAULT - } - return p.Node -} - -func (p *ConfigurationProposalAction) GetType() ConfigType { - return p.Type -} -func (p *ConfigurationProposalAction) IsSetTarget() bool { - return p.Target != nil -} - -func (p *ConfigurationProposalAction) IsSetNode() bool { - return p.Node != nil -} - -func (p *ConfigurationProposalAction) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ConfigurationProposalAction) ReadField1(iprot thrift.TProtocol) error { - p.Target = &base.RPCAddress{} - if err := p.Target.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Target), err) - } - return nil -} - -func (p *ConfigurationProposalAction) ReadField2(iprot thrift.TProtocol) error { - p.Node = &base.RPCAddress{} - if err := p.Node.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) - } - return nil -} - -func (p *ConfigurationProposalAction) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := ConfigType(v) - p.Type = temp - } - return nil -} - -func (p *ConfigurationProposalAction) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("configuration_proposal_action"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ConfigurationProposalAction) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("target", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:target: ", p), err) - } - if err := p.Target.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Target), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:target: ", p), err) - } - return err -} - -func (p *ConfigurationProposalAction) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:node: ", p), err) - } - if err := p.Node.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:node: ", p), err) - } - return err -} - -func (p *ConfigurationProposalAction) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:type: ", p), err) - } - if err := oprot.WriteI32(int32(p.Type)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.type (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:type: ", p), err) - } - return err -} - -func (p *ConfigurationProposalAction) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ConfigurationProposalAction(%+v)", *p) -} - -// Attributes: -// - Gpid -// - ActionList -// - Force -// - BalanceType -type BalanceRequest struct { - Gpid *base.Gpid `thrift:"gpid,1" db:"gpid" json:"gpid"` - ActionList []*ConfigurationProposalAction `thrift:"action_list,2" db:"action_list" json:"action_list"` - Force bool `thrift:"force,3" db:"force" json:"force"` - BalanceType *BalancerRequestType `thrift:"balance_type,4" db:"balance_type" json:"balance_type,omitempty"` -} - -func NewBalanceRequest() *BalanceRequest { - return &BalanceRequest{} -} - -var BalanceRequest_Gpid_DEFAULT *base.Gpid - -func (p *BalanceRequest) GetGpid() *base.Gpid { - if !p.IsSetGpid() { - return BalanceRequest_Gpid_DEFAULT - } - return p.Gpid -} - -func (p *BalanceRequest) GetActionList() []*ConfigurationProposalAction { - return p.ActionList -} - -var BalanceRequest_Force_DEFAULT bool = false - -func (p *BalanceRequest) GetForce() bool { - return p.Force -} - -var BalanceRequest_BalanceType_DEFAULT BalancerRequestType - -func (p *BalanceRequest) GetBalanceType() BalancerRequestType { - if !p.IsSetBalanceType() { - return BalanceRequest_BalanceType_DEFAULT - } - return *p.BalanceType -} -func (p *BalanceRequest) IsSetGpid() bool { - return p.Gpid != nil -} - -func (p *BalanceRequest) IsSetForce() bool { - return p.Force != BalanceRequest_Force_DEFAULT -} - -func (p *BalanceRequest) IsSetBalanceType() bool { - return p.BalanceType != nil -} - -func (p *BalanceRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BalanceRequest) ReadField1(iprot thrift.TProtocol) error { - p.Gpid = &base.Gpid{} - if err := p.Gpid.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Gpid), err) - } - return nil -} - -func (p *BalanceRequest) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*ConfigurationProposalAction, 0, size) - p.ActionList = tSlice - for i := 0; i < size; i++ { - _elem20 := &ConfigurationProposalAction{} - if err := _elem20.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem20), err) - } - p.ActionList = append(p.ActionList, _elem20) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *BalanceRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Force = v - } - return nil -} - -func (p *BalanceRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - temp := BalancerRequestType(v) - p.BalanceType = &temp - } - return nil -} - -func (p *BalanceRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("balance_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BalanceRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("gpid", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:gpid: ", p), err) - } - if err := p.Gpid.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Gpid), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:gpid: ", p), err) - } - return err -} - -func (p *BalanceRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("action_list", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:action_list: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ActionList)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.ActionList { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:action_list: ", p), err) - } - return err -} - -func (p *BalanceRequest) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetForce() { - if err := oprot.WriteFieldBegin("force", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:force: ", p), err) - } - if err := oprot.WriteBool(bool(p.Force)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.force (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:force: ", p), err) - } - } - return err -} - -func (p *BalanceRequest) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetBalanceType() { - if err := oprot.WriteFieldBegin("balance_type", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:balance_type: ", p), err) - } - if err := oprot.WriteI32(int32(*p.BalanceType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.balance_type (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:balance_type: ", p), err) - } - } - return err -} - -func (p *BalanceRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BalanceRequest(%+v)", *p) -} - -// Attributes: -// - Err -type BalanceResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` -} - -func NewBalanceResponse() *BalanceResponse { - return &BalanceResponse{} -} - -var BalanceResponse_Err_DEFAULT *base.ErrorCode - -func (p *BalanceResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return BalanceResponse_Err_DEFAULT - } - return p.Err -} -func (p *BalanceResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *BalanceResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BalanceResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *BalanceResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("balance_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BalanceResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *BalanceResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BalanceResponse(%+v)", *p) -} - -type AdminClient interface { - // Parameters: - // - Req - CreateApp(ctx context.Context, req *CreateAppRequest) (r *CreateAppResponse, err error) - // Parameters: - // - Req - DropApp(ctx context.Context, req *DropAppRequest) (r *DropAppResponse, err error) - // Parameters: - // - Req - RecallApp(ctx context.Context, req *RecallAppRequest) (r *RecallAppResponse, err error) - // Parameters: - // - Req - ListApps(ctx context.Context, req *ListAppsRequest) (r *ListAppsResponse, err error) - // Parameters: - // - Req - AddDuplication(ctx context.Context, req *DuplicationAddRequest) (r *DuplicationAddResponse, err error) - // Parameters: - // - Req - QueryDuplication(ctx context.Context, req *DuplicationQueryRequest) (r *DuplicationQueryResponse, err error) - // Parameters: - // - Req - ModifyDuplication(ctx context.Context, req *DuplicationModifyRequest) (r *DuplicationModifyResponse, err error) - // Parameters: - // - Req - QueryAppInfo(ctx context.Context, req *QueryAppInfoRequest) (r *QueryAppInfoResponse, err error) - // Parameters: - // - Req - UpdateAppEnv(ctx context.Context, req *UpdateAppEnvRequest) (r *UpdateAppEnvResponse, err error) - // Parameters: - // - Req - ListNodes(ctx context.Context, req *ListNodesRequest) (r *ListNodesResponse, err error) - // Parameters: - // - Req - QueryClusterInfo(ctx context.Context, req *ClusterInfoRequest) (r *ClusterInfoResponse, err error) - // Parameters: - // - Req - MetaControl(ctx context.Context, req *MetaControlRequest) (r *MetaControlResponse, err error) - // Parameters: - // - Req - QueryBackupPolicy(ctx context.Context, req *QueryBackupPolicyRequest) (r *QueryBackupPolicyResponse, err error) - // Parameters: - // - Req - Balance(ctx context.Context, req *BalanceRequest) (r *BalanceResponse, err error) -} - -type AdminClientClient struct { - c thrift.TClient -} - -func NewAdminClientClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AdminClientClient { - return &AdminClientClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewAdminClientClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AdminClientClient { - return &AdminClientClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewAdminClientClient(c thrift.TClient) *AdminClientClient { - return &AdminClientClient{ - c: c, - } -} - -func (p *AdminClientClient) Client_() thrift.TClient { - return p.c -} - -// Parameters: -// - Req -func (p *AdminClientClient) CreateApp(ctx context.Context, req *CreateAppRequest) (r *CreateAppResponse, err error) { - var _args21 AdminClientCreateAppArgs - _args21.Req = req - var _result22 AdminClientCreateAppResult - if err = p.Client_().Call(ctx, "create_app", &_args21, &_result22); err != nil { - return - } - return _result22.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) DropApp(ctx context.Context, req *DropAppRequest) (r *DropAppResponse, err error) { - var _args23 AdminClientDropAppArgs - _args23.Req = req - var _result24 AdminClientDropAppResult - if err = p.Client_().Call(ctx, "drop_app", &_args23, &_result24); err != nil { - return - } - return _result24.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) RecallApp(ctx context.Context, req *RecallAppRequest) (r *RecallAppResponse, err error) { - var _args25 AdminClientRecallAppArgs - _args25.Req = req - var _result26 AdminClientRecallAppResult - if err = p.Client_().Call(ctx, "recall_app", &_args25, &_result26); err != nil { - return - } - return _result26.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) ListApps(ctx context.Context, req *ListAppsRequest) (r *ListAppsResponse, err error) { - var _args27 AdminClientListAppsArgs - _args27.Req = req - var _result28 AdminClientListAppsResult - if err = p.Client_().Call(ctx, "list_apps", &_args27, &_result28); err != nil { - return - } - return _result28.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) AddDuplication(ctx context.Context, req *DuplicationAddRequest) (r *DuplicationAddResponse, err error) { - var _args29 AdminClientAddDuplicationArgs - _args29.Req = req - var _result30 AdminClientAddDuplicationResult - if err = p.Client_().Call(ctx, "add_duplication", &_args29, &_result30); err != nil { - return - } - return _result30.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) QueryDuplication(ctx context.Context, req *DuplicationQueryRequest) (r *DuplicationQueryResponse, err error) { - var _args31 AdminClientQueryDuplicationArgs - _args31.Req = req - var _result32 AdminClientQueryDuplicationResult - if err = p.Client_().Call(ctx, "query_duplication", &_args31, &_result32); err != nil { - return - } - return _result32.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) ModifyDuplication(ctx context.Context, req *DuplicationModifyRequest) (r *DuplicationModifyResponse, err error) { - var _args33 AdminClientModifyDuplicationArgs - _args33.Req = req - var _result34 AdminClientModifyDuplicationResult - if err = p.Client_().Call(ctx, "modify_duplication", &_args33, &_result34); err != nil { - return - } - return _result34.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) QueryAppInfo(ctx context.Context, req *QueryAppInfoRequest) (r *QueryAppInfoResponse, err error) { - var _args35 AdminClientQueryAppInfoArgs - _args35.Req = req - var _result36 AdminClientQueryAppInfoResult - if err = p.Client_().Call(ctx, "query_app_info", &_args35, &_result36); err != nil { - return - } - return _result36.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) UpdateAppEnv(ctx context.Context, req *UpdateAppEnvRequest) (r *UpdateAppEnvResponse, err error) { - var _args37 AdminClientUpdateAppEnvArgs - _args37.Req = req - var _result38 AdminClientUpdateAppEnvResult - if err = p.Client_().Call(ctx, "update_app_env", &_args37, &_result38); err != nil { - return - } - return _result38.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) ListNodes(ctx context.Context, req *ListNodesRequest) (r *ListNodesResponse, err error) { - var _args39 AdminClientListNodesArgs - _args39.Req = req - var _result40 AdminClientListNodesResult - if err = p.Client_().Call(ctx, "list_nodes", &_args39, &_result40); err != nil { - return - } - return _result40.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) QueryClusterInfo(ctx context.Context, req *ClusterInfoRequest) (r *ClusterInfoResponse, err error) { - var _args41 AdminClientQueryClusterInfoArgs - _args41.Req = req - var _result42 AdminClientQueryClusterInfoResult - if err = p.Client_().Call(ctx, "query_cluster_info", &_args41, &_result42); err != nil { - return - } - return _result42.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) MetaControl(ctx context.Context, req *MetaControlRequest) (r *MetaControlResponse, err error) { - var _args43 AdminClientMetaControlArgs - _args43.Req = req - var _result44 AdminClientMetaControlResult - if err = p.Client_().Call(ctx, "meta_control", &_args43, &_result44); err != nil { - return - } - return _result44.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) QueryBackupPolicy(ctx context.Context, req *QueryBackupPolicyRequest) (r *QueryBackupPolicyResponse, err error) { - var _args45 AdminClientQueryBackupPolicyArgs - _args45.Req = req - var _result46 AdminClientQueryBackupPolicyResult - if err = p.Client_().Call(ctx, "query_backup_policy", &_args45, &_result46); err != nil { - return - } - return _result46.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *AdminClientClient) Balance(ctx context.Context, req *BalanceRequest) (r *BalanceResponse, err error) { - var _args47 AdminClientBalanceArgs - _args47.Req = req - var _result48 AdminClientBalanceResult - if err = p.Client_().Call(ctx, "balance", &_args47, &_result48); err != nil { - return - } - return _result48.GetSuccess(), nil -} - -type AdminClientProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler AdminClient -} - -func (p *AdminClientProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AdminClientProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AdminClientProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAdminClientProcessor(handler AdminClient) *AdminClientProcessor { - - self49 := &AdminClientProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self49.processorMap["create_app"] = &adminClientProcessorCreateApp{handler: handler} - self49.processorMap["drop_app"] = &adminClientProcessorDropApp{handler: handler} - self49.processorMap["recall_app"] = &adminClientProcessorRecallApp{handler: handler} - self49.processorMap["list_apps"] = &adminClientProcessorListApps{handler: handler} - self49.processorMap["add_duplication"] = &adminClientProcessorAddDuplication{handler: handler} - self49.processorMap["query_duplication"] = &adminClientProcessorQueryDuplication{handler: handler} - self49.processorMap["modify_duplication"] = &adminClientProcessorModifyDuplication{handler: handler} - self49.processorMap["query_app_info"] = &adminClientProcessorQueryAppInfo{handler: handler} - self49.processorMap["update_app_env"] = &adminClientProcessorUpdateAppEnv{handler: handler} - self49.processorMap["list_nodes"] = &adminClientProcessorListNodes{handler: handler} - self49.processorMap["query_cluster_info"] = &adminClientProcessorQueryClusterInfo{handler: handler} - self49.processorMap["meta_control"] = &adminClientProcessorMetaControl{handler: handler} - self49.processorMap["query_backup_policy"] = &adminClientProcessorQueryBackupPolicy{handler: handler} - self49.processorMap["balance"] = &adminClientProcessorBalance{handler: handler} - return self49 -} - -func (p *AdminClientProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x50 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x50.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x50 - -} - -type adminClientProcessorCreateApp struct { - handler AdminClient -} - -func (p *adminClientProcessorCreateApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientCreateAppArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("create_app", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientCreateAppResult{} - var retval *CreateAppResponse - var err2 error - if retval, err2 = p.handler.CreateApp(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing create_app: "+err2.Error()) - oprot.WriteMessageBegin("create_app", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("create_app", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorDropApp struct { - handler AdminClient -} - -func (p *adminClientProcessorDropApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientDropAppArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("drop_app", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientDropAppResult{} - var retval *DropAppResponse - var err2 error - if retval, err2 = p.handler.DropApp(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing drop_app: "+err2.Error()) - oprot.WriteMessageBegin("drop_app", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("drop_app", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorRecallApp struct { - handler AdminClient -} - -func (p *adminClientProcessorRecallApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientRecallAppArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("recall_app", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientRecallAppResult{} - var retval *RecallAppResponse - var err2 error - if retval, err2 = p.handler.RecallApp(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing recall_app: "+err2.Error()) - oprot.WriteMessageBegin("recall_app", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("recall_app", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorListApps struct { - handler AdminClient -} - -func (p *adminClientProcessorListApps) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientListAppsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("list_apps", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientListAppsResult{} - var retval *ListAppsResponse - var err2 error - if retval, err2 = p.handler.ListApps(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing list_apps: "+err2.Error()) - oprot.WriteMessageBegin("list_apps", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("list_apps", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorAddDuplication struct { - handler AdminClient -} - -func (p *adminClientProcessorAddDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientAddDuplicationArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("add_duplication", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientAddDuplicationResult{} - var retval *DuplicationAddResponse - var err2 error - if retval, err2 = p.handler.AddDuplication(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing add_duplication: "+err2.Error()) - oprot.WriteMessageBegin("add_duplication", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("add_duplication", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorQueryDuplication struct { - handler AdminClient -} - -func (p *adminClientProcessorQueryDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientQueryDuplicationArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("query_duplication", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientQueryDuplicationResult{} - var retval *DuplicationQueryResponse - var err2 error - if retval, err2 = p.handler.QueryDuplication(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_duplication: "+err2.Error()) - oprot.WriteMessageBegin("query_duplication", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("query_duplication", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorModifyDuplication struct { - handler AdminClient -} - -func (p *adminClientProcessorModifyDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientModifyDuplicationArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("modify_duplication", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientModifyDuplicationResult{} - var retval *DuplicationModifyResponse - var err2 error - if retval, err2 = p.handler.ModifyDuplication(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing modify_duplication: "+err2.Error()) - oprot.WriteMessageBegin("modify_duplication", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("modify_duplication", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorQueryAppInfo struct { - handler AdminClient -} - -func (p *adminClientProcessorQueryAppInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientQueryAppInfoArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("query_app_info", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientQueryAppInfoResult{} - var retval *QueryAppInfoResponse - var err2 error - if retval, err2 = p.handler.QueryAppInfo(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_app_info: "+err2.Error()) - oprot.WriteMessageBegin("query_app_info", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("query_app_info", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorUpdateAppEnv struct { - handler AdminClient -} - -func (p *adminClientProcessorUpdateAppEnv) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientUpdateAppEnvArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("update_app_env", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientUpdateAppEnvResult{} - var retval *UpdateAppEnvResponse - var err2 error - if retval, err2 = p.handler.UpdateAppEnv(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing update_app_env: "+err2.Error()) - oprot.WriteMessageBegin("update_app_env", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("update_app_env", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorListNodes struct { - handler AdminClient -} - -func (p *adminClientProcessorListNodes) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientListNodesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("list_nodes", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientListNodesResult{} - var retval *ListNodesResponse - var err2 error - if retval, err2 = p.handler.ListNodes(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing list_nodes: "+err2.Error()) - oprot.WriteMessageBegin("list_nodes", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("list_nodes", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorQueryClusterInfo struct { - handler AdminClient -} - -func (p *adminClientProcessorQueryClusterInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientQueryClusterInfoArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("query_cluster_info", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientQueryClusterInfoResult{} - var retval *ClusterInfoResponse - var err2 error - if retval, err2 = p.handler.QueryClusterInfo(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_cluster_info: "+err2.Error()) - oprot.WriteMessageBegin("query_cluster_info", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("query_cluster_info", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorMetaControl struct { - handler AdminClient -} - -func (p *adminClientProcessorMetaControl) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientMetaControlArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("meta_control", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientMetaControlResult{} - var retval *MetaControlResponse - var err2 error - if retval, err2 = p.handler.MetaControl(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing meta_control: "+err2.Error()) - oprot.WriteMessageBegin("meta_control", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("meta_control", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorQueryBackupPolicy struct { - handler AdminClient -} - -func (p *adminClientProcessorQueryBackupPolicy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientQueryBackupPolicyArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("query_backup_policy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientQueryBackupPolicyResult{} - var retval *QueryBackupPolicyResponse - var err2 error - if retval, err2 = p.handler.QueryBackupPolicy(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_backup_policy: "+err2.Error()) - oprot.WriteMessageBegin("query_backup_policy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("query_backup_policy", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type adminClientProcessorBalance struct { - handler AdminClient -} - -func (p *adminClientProcessorBalance) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AdminClientBalanceArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("balance", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := AdminClientBalanceResult{} - var retval *BalanceResponse - var err2 error - if retval, err2 = p.handler.Balance(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing balance: "+err2.Error()) - oprot.WriteMessageBegin("balance", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("balance", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Req -type AdminClientCreateAppArgs struct { - Req *CreateAppRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientCreateAppArgs() *AdminClientCreateAppArgs { - return &AdminClientCreateAppArgs{} -} - -var AdminClientCreateAppArgs_Req_DEFAULT *CreateAppRequest - -func (p *AdminClientCreateAppArgs) GetReq() *CreateAppRequest { - if !p.IsSetReq() { - return AdminClientCreateAppArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientCreateAppArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientCreateAppArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientCreateAppArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &CreateAppRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientCreateAppArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("create_app_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientCreateAppArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientCreateAppArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientCreateAppArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientCreateAppResult struct { - Success *CreateAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientCreateAppResult() *AdminClientCreateAppResult { - return &AdminClientCreateAppResult{} -} - -var AdminClientCreateAppResult_Success_DEFAULT *CreateAppResponse - -func (p *AdminClientCreateAppResult) GetSuccess() *CreateAppResponse { - if !p.IsSetSuccess() { - return AdminClientCreateAppResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientCreateAppResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientCreateAppResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientCreateAppResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &CreateAppResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientCreateAppResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("create_app_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientCreateAppResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientCreateAppResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientCreateAppResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientDropAppArgs struct { - Req *DropAppRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientDropAppArgs() *AdminClientDropAppArgs { - return &AdminClientDropAppArgs{} -} - -var AdminClientDropAppArgs_Req_DEFAULT *DropAppRequest - -func (p *AdminClientDropAppArgs) GetReq() *DropAppRequest { - if !p.IsSetReq() { - return AdminClientDropAppArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientDropAppArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientDropAppArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientDropAppArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &DropAppRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientDropAppArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("drop_app_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientDropAppArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientDropAppArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientDropAppArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientDropAppResult struct { - Success *DropAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientDropAppResult() *AdminClientDropAppResult { - return &AdminClientDropAppResult{} -} - -var AdminClientDropAppResult_Success_DEFAULT *DropAppResponse - -func (p *AdminClientDropAppResult) GetSuccess() *DropAppResponse { - if !p.IsSetSuccess() { - return AdminClientDropAppResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientDropAppResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientDropAppResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientDropAppResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &DropAppResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientDropAppResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("drop_app_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientDropAppResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientDropAppResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientDropAppResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientRecallAppArgs struct { - Req *RecallAppRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientRecallAppArgs() *AdminClientRecallAppArgs { - return &AdminClientRecallAppArgs{} -} - -var AdminClientRecallAppArgs_Req_DEFAULT *RecallAppRequest - -func (p *AdminClientRecallAppArgs) GetReq() *RecallAppRequest { - if !p.IsSetReq() { - return AdminClientRecallAppArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientRecallAppArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientRecallAppArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientRecallAppArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &RecallAppRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientRecallAppArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("recall_app_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientRecallAppArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientRecallAppArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientRecallAppArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientRecallAppResult struct { - Success *RecallAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientRecallAppResult() *AdminClientRecallAppResult { - return &AdminClientRecallAppResult{} -} - -var AdminClientRecallAppResult_Success_DEFAULT *RecallAppResponse - -func (p *AdminClientRecallAppResult) GetSuccess() *RecallAppResponse { - if !p.IsSetSuccess() { - return AdminClientRecallAppResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientRecallAppResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientRecallAppResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientRecallAppResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &RecallAppResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientRecallAppResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("recall_app_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientRecallAppResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientRecallAppResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientRecallAppResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientListAppsArgs struct { - Req *ListAppsRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientListAppsArgs() *AdminClientListAppsArgs { - return &AdminClientListAppsArgs{} -} - -var AdminClientListAppsArgs_Req_DEFAULT *ListAppsRequest - -func (p *AdminClientListAppsArgs) GetReq() *ListAppsRequest { - if !p.IsSetReq() { - return AdminClientListAppsArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientListAppsArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientListAppsArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientListAppsArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &ListAppsRequest{ - Status: 0, - } - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientListAppsArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_apps_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientListAppsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientListAppsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientListAppsArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientListAppsResult struct { - Success *ListAppsResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientListAppsResult() *AdminClientListAppsResult { - return &AdminClientListAppsResult{} -} - -var AdminClientListAppsResult_Success_DEFAULT *ListAppsResponse - -func (p *AdminClientListAppsResult) GetSuccess() *ListAppsResponse { - if !p.IsSetSuccess() { - return AdminClientListAppsResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientListAppsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientListAppsResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientListAppsResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ListAppsResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientListAppsResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_apps_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientListAppsResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientListAppsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientListAppsResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientAddDuplicationArgs struct { - Req *DuplicationAddRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientAddDuplicationArgs() *AdminClientAddDuplicationArgs { - return &AdminClientAddDuplicationArgs{} -} - -var AdminClientAddDuplicationArgs_Req_DEFAULT *DuplicationAddRequest - -func (p *AdminClientAddDuplicationArgs) GetReq() *DuplicationAddRequest { - if !p.IsSetReq() { - return AdminClientAddDuplicationArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientAddDuplicationArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientAddDuplicationArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientAddDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &DuplicationAddRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientAddDuplicationArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("add_duplication_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientAddDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientAddDuplicationArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientAddDuplicationArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientAddDuplicationResult struct { - Success *DuplicationAddResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientAddDuplicationResult() *AdminClientAddDuplicationResult { - return &AdminClientAddDuplicationResult{} -} - -var AdminClientAddDuplicationResult_Success_DEFAULT *DuplicationAddResponse - -func (p *AdminClientAddDuplicationResult) GetSuccess() *DuplicationAddResponse { - if !p.IsSetSuccess() { - return AdminClientAddDuplicationResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientAddDuplicationResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientAddDuplicationResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientAddDuplicationResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &DuplicationAddResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientAddDuplicationResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("add_duplication_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientAddDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientAddDuplicationResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientAddDuplicationResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientQueryDuplicationArgs struct { - Req *DuplicationQueryRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientQueryDuplicationArgs() *AdminClientQueryDuplicationArgs { - return &AdminClientQueryDuplicationArgs{} -} - -var AdminClientQueryDuplicationArgs_Req_DEFAULT *DuplicationQueryRequest - -func (p *AdminClientQueryDuplicationArgs) GetReq() *DuplicationQueryRequest { - if !p.IsSetReq() { - return AdminClientQueryDuplicationArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientQueryDuplicationArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientQueryDuplicationArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &DuplicationQueryRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientQueryDuplicationArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_duplication_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientQueryDuplicationArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryDuplicationArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientQueryDuplicationResult struct { - Success *DuplicationQueryResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientQueryDuplicationResult() *AdminClientQueryDuplicationResult { - return &AdminClientQueryDuplicationResult{} -} - -var AdminClientQueryDuplicationResult_Success_DEFAULT *DuplicationQueryResponse - -func (p *AdminClientQueryDuplicationResult) GetSuccess() *DuplicationQueryResponse { - if !p.IsSetSuccess() { - return AdminClientQueryDuplicationResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientQueryDuplicationResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientQueryDuplicationResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryDuplicationResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &DuplicationQueryResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientQueryDuplicationResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_duplication_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientQueryDuplicationResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryDuplicationResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientModifyDuplicationArgs struct { - Req *DuplicationModifyRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientModifyDuplicationArgs() *AdminClientModifyDuplicationArgs { - return &AdminClientModifyDuplicationArgs{} -} - -var AdminClientModifyDuplicationArgs_Req_DEFAULT *DuplicationModifyRequest - -func (p *AdminClientModifyDuplicationArgs) GetReq() *DuplicationModifyRequest { - if !p.IsSetReq() { - return AdminClientModifyDuplicationArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientModifyDuplicationArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientModifyDuplicationArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientModifyDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &DuplicationModifyRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientModifyDuplicationArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("modify_duplication_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientModifyDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientModifyDuplicationArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientModifyDuplicationArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientModifyDuplicationResult struct { - Success *DuplicationModifyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientModifyDuplicationResult() *AdminClientModifyDuplicationResult { - return &AdminClientModifyDuplicationResult{} -} - -var AdminClientModifyDuplicationResult_Success_DEFAULT *DuplicationModifyResponse - -func (p *AdminClientModifyDuplicationResult) GetSuccess() *DuplicationModifyResponse { - if !p.IsSetSuccess() { - return AdminClientModifyDuplicationResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientModifyDuplicationResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientModifyDuplicationResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientModifyDuplicationResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &DuplicationModifyResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientModifyDuplicationResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("modify_duplication_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientModifyDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientModifyDuplicationResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientModifyDuplicationResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientQueryAppInfoArgs struct { - Req *QueryAppInfoRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientQueryAppInfoArgs() *AdminClientQueryAppInfoArgs { - return &AdminClientQueryAppInfoArgs{} -} - -var AdminClientQueryAppInfoArgs_Req_DEFAULT *QueryAppInfoRequest - -func (p *AdminClientQueryAppInfoArgs) GetReq() *QueryAppInfoRequest { - if !p.IsSetReq() { - return AdminClientQueryAppInfoArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientQueryAppInfoArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientQueryAppInfoArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryAppInfoArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &QueryAppInfoRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientQueryAppInfoArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_app_info_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryAppInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientQueryAppInfoArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryAppInfoArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientQueryAppInfoResult struct { - Success *QueryAppInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientQueryAppInfoResult() *AdminClientQueryAppInfoResult { - return &AdminClientQueryAppInfoResult{} -} - -var AdminClientQueryAppInfoResult_Success_DEFAULT *QueryAppInfoResponse - -func (p *AdminClientQueryAppInfoResult) GetSuccess() *QueryAppInfoResponse { - if !p.IsSetSuccess() { - return AdminClientQueryAppInfoResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientQueryAppInfoResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientQueryAppInfoResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryAppInfoResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &QueryAppInfoResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientQueryAppInfoResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_app_info_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryAppInfoResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientQueryAppInfoResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryAppInfoResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientUpdateAppEnvArgs struct { - Req *UpdateAppEnvRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientUpdateAppEnvArgs() *AdminClientUpdateAppEnvArgs { - return &AdminClientUpdateAppEnvArgs{} -} - -var AdminClientUpdateAppEnvArgs_Req_DEFAULT *UpdateAppEnvRequest - -func (p *AdminClientUpdateAppEnvArgs) GetReq() *UpdateAppEnvRequest { - if !p.IsSetReq() { - return AdminClientUpdateAppEnvArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientUpdateAppEnvArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientUpdateAppEnvArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientUpdateAppEnvArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &UpdateAppEnvRequest{ - Op: 0, - } - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientUpdateAppEnvArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("update_app_env_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientUpdateAppEnvArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientUpdateAppEnvArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientUpdateAppEnvArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientUpdateAppEnvResult struct { - Success *UpdateAppEnvResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientUpdateAppEnvResult() *AdminClientUpdateAppEnvResult { - return &AdminClientUpdateAppEnvResult{} -} - -var AdminClientUpdateAppEnvResult_Success_DEFAULT *UpdateAppEnvResponse - -func (p *AdminClientUpdateAppEnvResult) GetSuccess() *UpdateAppEnvResponse { - if !p.IsSetSuccess() { - return AdminClientUpdateAppEnvResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientUpdateAppEnvResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientUpdateAppEnvResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientUpdateAppEnvResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &UpdateAppEnvResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientUpdateAppEnvResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("update_app_env_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientUpdateAppEnvResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientUpdateAppEnvResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientUpdateAppEnvResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientListNodesArgs struct { - Req *ListNodesRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientListNodesArgs() *AdminClientListNodesArgs { - return &AdminClientListNodesArgs{} -} - -var AdminClientListNodesArgs_Req_DEFAULT *ListNodesRequest - -func (p *AdminClientListNodesArgs) GetReq() *ListNodesRequest { - if !p.IsSetReq() { - return AdminClientListNodesArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientListNodesArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientListNodesArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientListNodesArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &ListNodesRequest{ - Status: 0, - } - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientListNodesArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_nodes_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientListNodesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientListNodesArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientListNodesArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientListNodesResult struct { - Success *ListNodesResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientListNodesResult() *AdminClientListNodesResult { - return &AdminClientListNodesResult{} -} - -var AdminClientListNodesResult_Success_DEFAULT *ListNodesResponse - -func (p *AdminClientListNodesResult) GetSuccess() *ListNodesResponse { - if !p.IsSetSuccess() { - return AdminClientListNodesResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientListNodesResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientListNodesResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientListNodesResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ListNodesResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientListNodesResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("list_nodes_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientListNodesResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientListNodesResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientListNodesResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientQueryClusterInfoArgs struct { - Req *ClusterInfoRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientQueryClusterInfoArgs() *AdminClientQueryClusterInfoArgs { - return &AdminClientQueryClusterInfoArgs{} -} - -var AdminClientQueryClusterInfoArgs_Req_DEFAULT *ClusterInfoRequest - -func (p *AdminClientQueryClusterInfoArgs) GetReq() *ClusterInfoRequest { - if !p.IsSetReq() { - return AdminClientQueryClusterInfoArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientQueryClusterInfoArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientQueryClusterInfoArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryClusterInfoArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &ClusterInfoRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientQueryClusterInfoArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_cluster_info_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryClusterInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientQueryClusterInfoArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryClusterInfoArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientQueryClusterInfoResult struct { - Success *ClusterInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientQueryClusterInfoResult() *AdminClientQueryClusterInfoResult { - return &AdminClientQueryClusterInfoResult{} -} - -var AdminClientQueryClusterInfoResult_Success_DEFAULT *ClusterInfoResponse - -func (p *AdminClientQueryClusterInfoResult) GetSuccess() *ClusterInfoResponse { - if !p.IsSetSuccess() { - return AdminClientQueryClusterInfoResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientQueryClusterInfoResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientQueryClusterInfoResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryClusterInfoResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ClusterInfoResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientQueryClusterInfoResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_cluster_info_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryClusterInfoResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientQueryClusterInfoResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryClusterInfoResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientMetaControlArgs struct { - Req *MetaControlRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientMetaControlArgs() *AdminClientMetaControlArgs { - return &AdminClientMetaControlArgs{} -} - -var AdminClientMetaControlArgs_Req_DEFAULT *MetaControlRequest - -func (p *AdminClientMetaControlArgs) GetReq() *MetaControlRequest { - if !p.IsSetReq() { - return AdminClientMetaControlArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientMetaControlArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientMetaControlArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientMetaControlArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &MetaControlRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientMetaControlArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("meta_control_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientMetaControlArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientMetaControlArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientMetaControlArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientMetaControlResult struct { - Success *MetaControlResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientMetaControlResult() *AdminClientMetaControlResult { - return &AdminClientMetaControlResult{} -} - -var AdminClientMetaControlResult_Success_DEFAULT *MetaControlResponse - -func (p *AdminClientMetaControlResult) GetSuccess() *MetaControlResponse { - if !p.IsSetSuccess() { - return AdminClientMetaControlResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientMetaControlResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientMetaControlResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientMetaControlResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &MetaControlResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientMetaControlResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("meta_control_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientMetaControlResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientMetaControlResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientMetaControlResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientQueryBackupPolicyArgs struct { - Req *QueryBackupPolicyRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientQueryBackupPolicyArgs() *AdminClientQueryBackupPolicyArgs { - return &AdminClientQueryBackupPolicyArgs{} -} - -var AdminClientQueryBackupPolicyArgs_Req_DEFAULT *QueryBackupPolicyRequest - -func (p *AdminClientQueryBackupPolicyArgs) GetReq() *QueryBackupPolicyRequest { - if !p.IsSetReq() { - return AdminClientQueryBackupPolicyArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientQueryBackupPolicyArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientQueryBackupPolicyArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryBackupPolicyArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &QueryBackupPolicyRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientQueryBackupPolicyArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_backup_policy_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryBackupPolicyArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientQueryBackupPolicyArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryBackupPolicyArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientQueryBackupPolicyResult struct { - Success *QueryBackupPolicyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientQueryBackupPolicyResult() *AdminClientQueryBackupPolicyResult { - return &AdminClientQueryBackupPolicyResult{} -} - -var AdminClientQueryBackupPolicyResult_Success_DEFAULT *QueryBackupPolicyResponse - -func (p *AdminClientQueryBackupPolicyResult) GetSuccess() *QueryBackupPolicyResponse { - if !p.IsSetSuccess() { - return AdminClientQueryBackupPolicyResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientQueryBackupPolicyResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientQueryBackupPolicyResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientQueryBackupPolicyResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &QueryBackupPolicyResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientQueryBackupPolicyResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_backup_policy_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientQueryBackupPolicyResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientQueryBackupPolicyResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientQueryBackupPolicyResult(%+v)", *p) -} - -// Attributes: -// - Req -type AdminClientBalanceArgs struct { - Req *BalanceRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewAdminClientBalanceArgs() *AdminClientBalanceArgs { - return &AdminClientBalanceArgs{} -} - -var AdminClientBalanceArgs_Req_DEFAULT *BalanceRequest - -func (p *AdminClientBalanceArgs) GetReq() *BalanceRequest { - if !p.IsSetReq() { - return AdminClientBalanceArgs_Req_DEFAULT - } - return p.Req -} -func (p *AdminClientBalanceArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *AdminClientBalanceArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientBalanceArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &BalanceRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *AdminClientBalanceArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("balance_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientBalanceArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *AdminClientBalanceArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientBalanceArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AdminClientBalanceResult struct { - Success *BalanceResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewAdminClientBalanceResult() *AdminClientBalanceResult { - return &AdminClientBalanceResult{} -} - -var AdminClientBalanceResult_Success_DEFAULT *BalanceResponse - -func (p *AdminClientBalanceResult) GetSuccess() *BalanceResponse { - if !p.IsSetSuccess() { - return AdminClientBalanceResult_Success_DEFAULT - } - return p.Success -} -func (p *AdminClientBalanceResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AdminClientBalanceResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AdminClientBalanceResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &BalanceResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AdminClientBalanceResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("balance_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AdminClientBalanceResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AdminClientBalanceResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AdminClientBalanceResult(%+v)", *p) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/GoUnusedProtection__.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/GoUnusedProtection__.go deleted file mode 100644 index e62b98f..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package base - -var GoUnusedProtection__ int diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/blob.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/blob.go deleted file mode 100644 index 4d9ee78..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/blob.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package base - -import ( - "fmt" - - "github.com/pegasus-kv/thrift/lib/go/thrift" -) - -type Blob struct { - Data []byte -} - -func (b *Blob) Read(iprot thrift.TProtocol) error { - data, err := iprot.ReadBinary() - if err != nil { - return err - } - b.Data = data - return nil -} - -func (b *Blob) Write(oprot thrift.TProtocol) error { - return oprot.WriteBinary(b.Data) -} - -func (b *Blob) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Blob(%+v)", *b) -} - -func NewBlob() *Blob { - return &Blob{} -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/dsn_err_string.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/dsn_err_string.go deleted file mode 100644 index a64542a..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/dsn_err_string.go +++ /dev/null @@ -1,110 +0,0 @@ -// Code generated by "enumer -type=DsnErrCode -output=dsn_err_string.go"; DO NOT EDIT. - -// -package base - -import ( - "fmt" -) - -const _DsnErrCodeName = "ERR_OKERR_UNKNOWNERR_REPLICATION_FAILUREERR_APP_EXISTERR_APP_NOT_EXISTERR_APP_DROPPEDERR_BUSY_CREATINGERR_BUSY_DROPPINGERR_EXPIREDERR_LOCK_ALREADY_EXISTERR_HOLD_BY_OTHERSERR_RECURSIVE_LOCKERR_NO_OWNERERR_NODE_ALREADY_EXISTERR_INCONSISTENT_STATEERR_ARRAY_INDEX_OUT_OF_RANGEERR_SERVICE_NOT_FOUNDERR_SERVICE_ALREADY_RUNNINGERR_IO_PENDINGERR_TIMEOUTERR_SERVICE_NOT_ACTIVEERR_BUSYERR_NETWORK_INIT_FAILEDERR_FORWARD_TO_OTHERSERR_OBJECT_NOT_FOUNDERR_HANDLER_NOT_FOUNDERR_LEARN_FILE_FAILEDERR_GET_LEARN_STATE_FAILEDERR_INVALID_VERSIONERR_INVALID_PARAMETERSERR_CAPACITY_EXCEEDEDERR_INVALID_STATEERR_INACTIVE_STATEERR_NOT_ENOUGH_MEMBERERR_FILE_OPERATION_FAILEDERR_HANDLE_EOFERR_WRONG_CHECKSUMERR_INVALID_DATAERR_INVALID_HANDLEERR_INCOMPLETE_DATAERR_VERSION_OUTDATEDERR_PATH_NOT_FOUNDERR_PATH_ALREADY_EXISTERR_ADDRESS_ALREADY_USEDERR_STATE_FREEZEDERR_LOCAL_APP_FAILUREERR_BIND_IOCP_FAILEDERR_NETWORK_START_FAILEDERR_NOT_IMPLEMENTEDERR_CHECKPOINT_FAILEDERR_WRONG_TIMINGERR_NO_NEED_OPERATEERR_CORRUPTIONERR_TRY_AGAINERR_CLUSTER_NOT_FOUNDERR_CLUSTER_ALREADY_EXISTERR_SERVICE_ALREADY_EXISTERR_INJECTEDERR_NETWORK_FAILUREERR_UNDER_RECOVERYERR_OPERATION_DISABLEDERR_ZOOKEEPER_OPERATION" - -var _DsnErrCodeIndex = [...]uint16{0, 6, 17, 40, 53, 70, 85, 102, 119, 130, 152, 170, 188, 200, 222, 244, 272, 293, 320, 334, 345, 367, 375, 398, 419, 439, 460, 481, 507, 526, 548, 569, 586, 604, 625, 650, 664, 682, 698, 716, 735, 755, 773, 795, 819, 836, 857, 877, 901, 920, 941, 957, 976, 990, 1003, 1024, 1049, 1074, 1086, 1105, 1123, 1145, 1168} - -func (i DsnErrCode) String() string { - if i < 0 || i >= DsnErrCode(len(_DsnErrCodeIndex)-1) { - return fmt.Sprintf("DsnErrCode(%d)", i) - } - return _DsnErrCodeName[_DsnErrCodeIndex[i]:_DsnErrCodeIndex[i+1]] -} - -var _DsnErrCodeValues = []DsnErrCode{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61} - -var _DsnErrCodeNameToValueMap = map[string]DsnErrCode{ - _DsnErrCodeName[0:6]: 0, - _DsnErrCodeName[6:17]: 1, - _DsnErrCodeName[17:40]: 2, - _DsnErrCodeName[40:53]: 3, - _DsnErrCodeName[53:70]: 4, - _DsnErrCodeName[70:85]: 5, - _DsnErrCodeName[85:102]: 6, - _DsnErrCodeName[102:119]: 7, - _DsnErrCodeName[119:130]: 8, - _DsnErrCodeName[130:152]: 9, - _DsnErrCodeName[152:170]: 10, - _DsnErrCodeName[170:188]: 11, - _DsnErrCodeName[188:200]: 12, - _DsnErrCodeName[200:222]: 13, - _DsnErrCodeName[222:244]: 14, - _DsnErrCodeName[244:272]: 15, - _DsnErrCodeName[272:293]: 16, - _DsnErrCodeName[293:320]: 17, - _DsnErrCodeName[320:334]: 18, - _DsnErrCodeName[334:345]: 19, - _DsnErrCodeName[345:367]: 20, - _DsnErrCodeName[367:375]: 21, - _DsnErrCodeName[375:398]: 22, - _DsnErrCodeName[398:419]: 23, - _DsnErrCodeName[419:439]: 24, - _DsnErrCodeName[439:460]: 25, - _DsnErrCodeName[460:481]: 26, - _DsnErrCodeName[481:507]: 27, - _DsnErrCodeName[507:526]: 28, - _DsnErrCodeName[526:548]: 29, - _DsnErrCodeName[548:569]: 30, - _DsnErrCodeName[569:586]: 31, - _DsnErrCodeName[586:604]: 32, - _DsnErrCodeName[604:625]: 33, - _DsnErrCodeName[625:650]: 34, - _DsnErrCodeName[650:664]: 35, - _DsnErrCodeName[664:682]: 36, - _DsnErrCodeName[682:698]: 37, - _DsnErrCodeName[698:716]: 38, - _DsnErrCodeName[716:735]: 39, - _DsnErrCodeName[735:755]: 40, - _DsnErrCodeName[755:773]: 41, - _DsnErrCodeName[773:795]: 42, - _DsnErrCodeName[795:819]: 43, - _DsnErrCodeName[819:836]: 44, - _DsnErrCodeName[836:857]: 45, - _DsnErrCodeName[857:877]: 46, - _DsnErrCodeName[877:901]: 47, - _DsnErrCodeName[901:920]: 48, - _DsnErrCodeName[920:941]: 49, - _DsnErrCodeName[941:957]: 50, - _DsnErrCodeName[957:976]: 51, - _DsnErrCodeName[976:990]: 52, - _DsnErrCodeName[990:1003]: 53, - _DsnErrCodeName[1003:1024]: 54, - _DsnErrCodeName[1024:1049]: 55, - _DsnErrCodeName[1049:1074]: 56, - _DsnErrCodeName[1074:1086]: 57, - _DsnErrCodeName[1086:1105]: 58, - _DsnErrCodeName[1105:1123]: 59, - _DsnErrCodeName[1123:1145]: 60, - _DsnErrCodeName[1145:1168]: 61, -} - -// DsnErrCodeString retrieves an enum value from the enum constants string name. -// Throws an error if the param is not part of the enum. -func DsnErrCodeString(s string) (DsnErrCode, error) { - if val, ok := _DsnErrCodeNameToValueMap[s]; ok { - return val, nil - } - return 0, fmt.Errorf("%s does not belong to DsnErrCode values", s) -} - -// DsnErrCodeValues returns all values of the enum -func DsnErrCodeValues() []DsnErrCode { - return _DsnErrCodeValues -} - -// IsADsnErrCode returns "true" if the value is listed in the enum definition. "false" otherwise -func (i DsnErrCode) IsADsnErrCode() bool { - for _, v := range _DsnErrCodeValues { - if i == v { - return true - } - } - return false -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/error_code.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/error_code.go deleted file mode 100644 index 7489496..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/error_code.go +++ /dev/null @@ -1,139 +0,0 @@ -package base - -import ( - "fmt" - - "github.com/pegasus-kv/thrift/lib/go/thrift" -) - -/// Primitive for Pegasus thrift framework. -type ErrorCode struct { - Errno string -} - -// How to generate the map from string to error codes? -// First: -// - go get github.com/alvaroloes/enumer -// Second: -// - cd idl/base -// - enumer -type=DsnErrCode -output=dsn_err_string.go - -//go:generate enumer -type=DsnErrCode -output=err_type_string.go -type DsnErrCode int32 - -const ( - ERR_OK DsnErrCode = iota - ERR_UNKNOWN - ERR_REPLICATION_FAILURE - ERR_APP_EXIST - ERR_APP_NOT_EXIST - ERR_APP_DROPPED - ERR_BUSY_CREATING - ERR_BUSY_DROPPING - ERR_EXPIRED - ERR_LOCK_ALREADY_EXIST - ERR_HOLD_BY_OTHERS - ERR_RECURSIVE_LOCK - ERR_NO_OWNER - ERR_NODE_ALREADY_EXIST - ERR_INCONSISTENT_STATE - ERR_ARRAY_INDEX_OUT_OF_RANGE - ERR_SERVICE_NOT_FOUND - ERR_SERVICE_ALREADY_RUNNING - ERR_IO_PENDING - ERR_TIMEOUT - ERR_SERVICE_NOT_ACTIVE - ERR_BUSY - ERR_NETWORK_INIT_FAILED - ERR_FORWARD_TO_OTHERS - ERR_OBJECT_NOT_FOUND - ERR_HANDLER_NOT_FOUND - ERR_LEARN_FILE_FAILED - ERR_GET_LEARN_STATE_FAILED - ERR_INVALID_VERSION - ERR_INVALID_PARAMETERS - ERR_CAPACITY_EXCEEDED - ERR_INVALID_STATE - ERR_INACTIVE_STATE - ERR_NOT_ENOUGH_MEMBER - ERR_FILE_OPERATION_FAILED - ERR_HANDLE_EOF - ERR_WRONG_CHECKSUM - ERR_INVALID_DATA - ERR_INVALID_HANDLE - ERR_INCOMPLETE_DATA - ERR_VERSION_OUTDATED - ERR_PATH_NOT_FOUND - ERR_PATH_ALREADY_EXIST - ERR_ADDRESS_ALREADY_USED - ERR_STATE_FREEZED - ERR_LOCAL_APP_FAILURE - ERR_BIND_IOCP_FAILED - ERR_NETWORK_START_FAILED - ERR_NOT_IMPLEMENTED - ERR_CHECKPOINT_FAILED - ERR_WRONG_TIMING - ERR_NO_NEED_OPERATE - ERR_CORRUPTION - ERR_TRY_AGAIN - ERR_CLUSTER_NOT_FOUND - ERR_CLUSTER_ALREADY_EXIST - ERR_SERVICE_ALREADY_EXIST - ERR_INJECTED - ERR_NETWORK_FAILURE - ERR_UNDER_RECOVERY - ERR_OPERATION_DISABLED - ERR_ZOOKEEPER_OPERATION -) - -func (e DsnErrCode) Error() string { - return fmt.Sprintf("[%s]", e.String()) -} - -func (ec *ErrorCode) Read(iprot thrift.TProtocol) (err error) { - ec.Errno, err = iprot.ReadString() - return -} - -func (ec *ErrorCode) Write(oprot thrift.TProtocol) error { - return oprot.WriteString(ec.Errno) -} - -func (ec *ErrorCode) String() string { - if ec == nil { - return "" - } - return fmt.Sprintf("ErrorCode(%+v)", *ec) -} - -//go:generate enumer -type=RocksDBErrCode -output=rocskdb_err_string.go -type RocksDBErrCode int32 - -const ( - Ok RocksDBErrCode = iota - NotFound - Corruption - NotSupported - InvalidArgument - IOError - MergeInProgress - Incomplete - ShutdownInProgress - TimedOut - Aborted - Busy - Expired - TryAgain -) - -func NewRocksDBErrFromInt(e int32) error { - err := RocksDBErrCode(e) - if err == Ok { - return nil - } - return err -} - -func (e RocksDBErrCode) Error() string { - return fmt.Sprintf("ROCSKDB_ERR(%s)", e.String()) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/gpid.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/gpid.go deleted file mode 100644 index d4029c6..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/gpid.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package base - -import ( - "fmt" - - "github.com/pegasus-kv/thrift/lib/go/thrift" -) - -type Gpid struct { - Appid, PartitionIndex int32 -} - -func (id *Gpid) Read(iprot thrift.TProtocol) error { - v, err := iprot.ReadI64() - if err != nil { - return err - } - - id.Appid = int32(v & int64(0x00000000ffffffff)) - id.PartitionIndex = int32(v >> 32) - return nil -} - -func (id *Gpid) Write(oprot thrift.TProtocol) error { - v := int64(id.Appid) + int64(id.PartitionIndex)<<32 - return oprot.WriteI64(v) -} - -func (id *Gpid) String() string { - if id == nil { - return "" - } - return fmt.Sprintf("%+v", *id) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/rocskdb_err_string.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/rocskdb_err_string.go deleted file mode 100644 index 6c638f0..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/rocskdb_err_string.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by "enumer -type=RocksDBErrCode -output=rocskdb_err_string.go"; DO NOT EDIT - -package base - -import ( - "fmt" -) - -// Pegasus always returns rocksdb error in `resp.Error`, for data operations like PUT, GET. - -const _RocksDBErrCode_name = "OkNotFoundCorruptionNotSupportedInvalidArgumentIOErrorMergeInProgressIncompleteShutdownInProgressTimedOutAbortedBusyExpiredTryAgain" - -var _RocksDBErrCode_index = [...]uint8{0, 2, 10, 20, 32, 47, 54, 69, 79, 97, 105, 112, 116, 123, 131} - -func (i RocksDBErrCode) String() string { - if i < 0 || i >= RocksDBErrCode(len(_RocksDBErrCode_index)-1) { - return fmt.Sprintf("RocksDBErrCode(%d)", i) - } - return _RocksDBErrCode_name[_RocksDBErrCode_index[i]:_RocksDBErrCode_index[i+1]] -} - -var _RocksDBErrCodeNameToValue_map = map[string]RocksDBErrCode{ - _RocksDBErrCode_name[0:2]: 0, - _RocksDBErrCode_name[2:10]: 1, - _RocksDBErrCode_name[10:20]: 2, - _RocksDBErrCode_name[20:32]: 3, - _RocksDBErrCode_name[32:47]: 4, - _RocksDBErrCode_name[47:54]: 5, - _RocksDBErrCode_name[54:69]: 6, - _RocksDBErrCode_name[69:79]: 7, - _RocksDBErrCode_name[79:97]: 8, - _RocksDBErrCode_name[97:105]: 9, - _RocksDBErrCode_name[105:112]: 10, - _RocksDBErrCode_name[112:116]: 11, - _RocksDBErrCode_name[116:123]: 12, - _RocksDBErrCode_name[123:131]: 13, -} - -func RocksDBErrCodeString(s string) (RocksDBErrCode, error) { - if val, ok := _RocksDBErrCodeNameToValue_map[s]; ok { - return val, nil - } - return 0, fmt.Errorf("%s does not belong to RocksDBErrCode values", s) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/rpc_address.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/rpc_address.go deleted file mode 100644 index 673b025..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/base/rpc_address.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package base - -import ( - "encoding/binary" - "fmt" - "net" - - "github.com/pegasus-kv/thrift/lib/go/thrift" -) - -type RPCAddress struct { - address int64 -} - -func NewRPCAddress(ip net.IP, port int) *RPCAddress { - return &RPCAddress{ - address: (int64(binary.BigEndian.Uint32(ip.To4())) << 32) + (int64(port) << 16) + 1, - } -} - -func (r *RPCAddress) Read(iprot thrift.TProtocol) error { - address, err := iprot.ReadI64() - if err != nil { - return err - } - r.address = address - return nil -} - -func (r *RPCAddress) Write(oprot thrift.TProtocol) error { - return oprot.WriteI64(r.address) -} - -func (r *RPCAddress) String() string { - if r == nil { - return "" - } - return fmt.Sprintf("RPCAddress(%s)", r.GetAddress()) -} - -func (r *RPCAddress) getIp() net.IP { - return net.IPv4(byte(0xff&(r.address>>56)), byte(0xff&(r.address>>48)), byte(0xff&(r.address>>40)), byte(0xff&(r.address>>32))) -} - -func (r *RPCAddress) getPort() int { - return int(0xffff & (r.address >> 16)) -} - -func (r *RPCAddress) GetAddress() string { - return fmt.Sprintf("%s:%d", r.getIp(), r.getPort()) -} - -func (r *RPCAddress) GetRawAddress() int64 { - return r.address -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/GoUnusedProtection__.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/GoUnusedProtection__.go deleted file mode 100644 index b15aabc..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package cmd - -var GoUnusedProtection__ int diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/cmd-consts.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/cmd-consts.go deleted file mode 100644 index 1e7e8bd..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/cmd-consts.go +++ /dev/null @@ -1,22 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package cmd - -import ( - "bytes" - "context" - "fmt" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/cmd.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/cmd.go deleted file mode 100644 index b8541e7..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/cmd/cmd.go +++ /dev/null @@ -1,535 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package cmd - -import ( - "bytes" - "context" - "fmt" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -// Attributes: -// - Cmd -// - Arguments -type Command struct { - Cmd string `thrift:"cmd,1" db:"cmd" json:"cmd"` - Arguments []string `thrift:"arguments,2" db:"arguments" json:"arguments"` -} - -func NewCommand() *Command { - return &Command{} -} - -func (p *Command) GetCmd() string { - return p.Cmd -} - -func (p *Command) GetArguments() []string { - return p.Arguments -} -func (p *Command) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Command) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Cmd = v - } - return nil -} - -func (p *Command) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.Arguments = tSlice - for i := 0; i < size; i++ { - var _elem0 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem0 = v - } - p.Arguments = append(p.Arguments, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Command) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("command"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Command) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("cmd", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cmd: ", p), err) - } - if err := oprot.WriteString(string(p.Cmd)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.cmd (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cmd: ", p), err) - } - return err -} - -func (p *Command) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("arguments", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:arguments: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Arguments)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Arguments { - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:arguments: ", p), err) - } - return err -} - -func (p *Command) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Command(%+v)", *p) -} - -type RemoteCmdService interface { - // Parameters: - // - Cmd - CallCommand(ctx context.Context, cmd *Command) (r string, err error) -} - -type RemoteCmdServiceClient struct { - c thrift.TClient -} - -func NewRemoteCmdServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *RemoteCmdServiceClient { - return &RemoteCmdServiceClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewRemoteCmdServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *RemoteCmdServiceClient { - return &RemoteCmdServiceClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewRemoteCmdServiceClient(c thrift.TClient) *RemoteCmdServiceClient { - return &RemoteCmdServiceClient{ - c: c, - } -} - -func (p *RemoteCmdServiceClient) Client_() thrift.TClient { - return p.c -} - -// Parameters: -// - Cmd -func (p *RemoteCmdServiceClient) CallCommand(ctx context.Context, cmd *Command) (r string, err error) { - var _args1 RemoteCmdServiceCallCommandArgs - _args1.Cmd = cmd - var _result2 RemoteCmdServiceCallCommandResult - if err = p.Client_().Call(ctx, "callCommand", &_args1, &_result2); err != nil { - return - } - return _result2.GetSuccess(), nil -} - -type RemoteCmdServiceProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler RemoteCmdService -} - -func (p *RemoteCmdServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *RemoteCmdServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *RemoteCmdServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewRemoteCmdServiceProcessor(handler RemoteCmdService) *RemoteCmdServiceProcessor { - - self3 := &RemoteCmdServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self3.processorMap["callCommand"] = &remoteCmdServiceProcessorCallCommand{handler: handler} - return self3 -} - -func (p *RemoteCmdServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x4.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x4 - -} - -type remoteCmdServiceProcessorCallCommand struct { - handler RemoteCmdService -} - -func (p *remoteCmdServiceProcessorCallCommand) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RemoteCmdServiceCallCommandArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("callCommand", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RemoteCmdServiceCallCommandResult{} - var retval string - var err2 error - if retval, err2 = p.handler.CallCommand(ctx, args.Cmd); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing callCommand: "+err2.Error()) - oprot.WriteMessageBegin("callCommand", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = &retval - } - if err2 = oprot.WriteMessageBegin("callCommand", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Cmd -type RemoteCmdServiceCallCommandArgs struct { - Cmd *Command `thrift:"cmd,1" db:"cmd" json:"cmd"` -} - -func NewRemoteCmdServiceCallCommandArgs() *RemoteCmdServiceCallCommandArgs { - return &RemoteCmdServiceCallCommandArgs{} -} - -var RemoteCmdServiceCallCommandArgs_Cmd_DEFAULT *Command - -func (p *RemoteCmdServiceCallCommandArgs) GetCmd() *Command { - if !p.IsSetCmd() { - return RemoteCmdServiceCallCommandArgs_Cmd_DEFAULT - } - return p.Cmd -} -func (p *RemoteCmdServiceCallCommandArgs) IsSetCmd() bool { - return p.Cmd != nil -} - -func (p *RemoteCmdServiceCallCommandArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RemoteCmdServiceCallCommandArgs) ReadField1(iprot thrift.TProtocol) error { - p.Cmd = &Command{} - if err := p.Cmd.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Cmd), err) - } - return nil -} - -func (p *RemoteCmdServiceCallCommandArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("callCommand_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RemoteCmdServiceCallCommandArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("cmd", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cmd: ", p), err) - } - if err := p.Cmd.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Cmd), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cmd: ", p), err) - } - return err -} - -func (p *RemoteCmdServiceCallCommandArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RemoteCmdServiceCallCommandArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RemoteCmdServiceCallCommandResult struct { - Success *string `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRemoteCmdServiceCallCommandResult() *RemoteCmdServiceCallCommandResult { - return &RemoteCmdServiceCallCommandResult{} -} - -var RemoteCmdServiceCallCommandResult_Success_DEFAULT string - -func (p *RemoteCmdServiceCallCommandResult) GetSuccess() string { - if !p.IsSetSuccess() { - return RemoteCmdServiceCallCommandResult_Success_DEFAULT - } - return *p.Success -} -func (p *RemoteCmdServiceCallCommandResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RemoteCmdServiceCallCommandResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRING { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RemoteCmdServiceCallCommandResult) ReadField0(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - p.Success = &v - } - return nil -} - -func (p *RemoteCmdServiceCallCommandResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("callCommand_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RemoteCmdServiceCallCommandResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRING, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteString(string(*p.Success)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RemoteCmdServiceCallCommandResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RemoteCmdServiceCallCommandResult(%+v)", *p) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/GoUnusedProtection__.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/GoUnusedProtection__.go deleted file mode 100644 index c460900..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package radmin - -var GoUnusedProtection__ int diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/radmin-consts.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/radmin-consts.go deleted file mode 100644 index 159af7d..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/radmin-consts.go +++ /dev/null @@ -1,25 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package radmin - -import ( - "bytes" - "context" - "fmt" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/radmin.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/radmin.go deleted file mode 100644 index 2a5a0ba..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/radmin/radmin.go +++ /dev/null @@ -1,1815 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package radmin - -import ( - "bytes" - "context" - "fmt" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ - -// Attributes: -// - Tag -// - FullDir -// - DiskCapacityMb -// - DiskAvailableMb -// - HoldingPrimaryReplicas -// - HoldingSecondaryReplicas -type DiskInfo struct { - Tag string `thrift:"tag,1" db:"tag" json:"tag"` - FullDir string `thrift:"full_dir,2" db:"full_dir" json:"full_dir"` - DiskCapacityMb int64 `thrift:"disk_capacity_mb,3" db:"disk_capacity_mb" json:"disk_capacity_mb"` - DiskAvailableMb int64 `thrift:"disk_available_mb,4" db:"disk_available_mb" json:"disk_available_mb"` - HoldingPrimaryReplicas map[int32][]*base.Gpid `thrift:"holding_primary_replicas,5" db:"holding_primary_replicas" json:"holding_primary_replicas"` - HoldingSecondaryReplicas map[int32][]*base.Gpid `thrift:"holding_secondary_replicas,6" db:"holding_secondary_replicas" json:"holding_secondary_replicas"` -} - -func NewDiskInfo() *DiskInfo { - return &DiskInfo{} -} - -func (p *DiskInfo) GetTag() string { - return p.Tag -} - -func (p *DiskInfo) GetFullDir() string { - return p.FullDir -} - -func (p *DiskInfo) GetDiskCapacityMb() int64 { - return p.DiskCapacityMb -} - -func (p *DiskInfo) GetDiskAvailableMb() int64 { - return p.DiskAvailableMb -} - -func (p *DiskInfo) GetHoldingPrimaryReplicas() map[int32][]*base.Gpid { - return p.HoldingPrimaryReplicas -} - -func (p *DiskInfo) GetHoldingSecondaryReplicas() map[int32][]*base.Gpid { - return p.HoldingSecondaryReplicas -} -func (p *DiskInfo) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.MAP { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.MAP { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DiskInfo) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Tag = v - } - return nil -} - -func (p *DiskInfo) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.FullDir = v - } - return nil -} - -func (p *DiskInfo) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.DiskCapacityMb = v - } - return nil -} - -func (p *DiskInfo) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.DiskAvailableMb = v - } - return nil -} - -func (p *DiskInfo) ReadField5(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[int32][]*base.Gpid, size) - p.HoldingPrimaryReplicas = tMap - for i := 0; i < size; i++ { - var _key0 int32 - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key0 = v - } - _, size, err := iprot.ReadSetBegin() - if err != nil { - return thrift.PrependError("error reading set begin: ", err) - } - tSet := make([]*base.Gpid, 0, size) - _val1 := tSet - for i := 0; i < size; i++ { - _elem2 := &base.Gpid{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - _val1 = append(_val1, _elem2) - } - if err := iprot.ReadSetEnd(); err != nil { - return thrift.PrependError("error reading set end: ", err) - } - p.HoldingPrimaryReplicas[_key0] = _val1 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *DiskInfo) ReadField6(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[int32][]*base.Gpid, size) - p.HoldingSecondaryReplicas = tMap - for i := 0; i < size; i++ { - var _key3 int32 - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key3 = v - } - _, size, err := iprot.ReadSetBegin() - if err != nil { - return thrift.PrependError("error reading set begin: ", err) - } - tSet := make([]*base.Gpid, 0, size) - _val4 := tSet - for i := 0; i < size; i++ { - _elem5 := &base.Gpid{} - if err := _elem5.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) - } - _val4 = append(_val4, _elem5) - } - if err := iprot.ReadSetEnd(); err != nil { - return thrift.PrependError("error reading set end: ", err) - } - p.HoldingSecondaryReplicas[_key3] = _val4 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *DiskInfo) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("disk_info"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *DiskInfo) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tag", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:tag: ", p), err) - } - if err := oprot.WriteString(string(p.Tag)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.tag (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:tag: ", p), err) - } - return err -} - -func (p *DiskInfo) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("full_dir", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:full_dir: ", p), err) - } - if err := oprot.WriteString(string(p.FullDir)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.full_dir (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:full_dir: ", p), err) - } - return err -} - -func (p *DiskInfo) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("disk_capacity_mb", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:disk_capacity_mb: ", p), err) - } - if err := oprot.WriteI64(int64(p.DiskCapacityMb)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.disk_capacity_mb (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:disk_capacity_mb: ", p), err) - } - return err -} - -func (p *DiskInfo) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("disk_available_mb", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:disk_available_mb: ", p), err) - } - if err := oprot.WriteI64(int64(p.DiskAvailableMb)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.disk_available_mb (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:disk_available_mb: ", p), err) - } - return err -} - -func (p *DiskInfo) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("holding_primary_replicas", thrift.MAP, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:holding_primary_replicas: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.SET, len(p.HoldingPrimaryReplicas)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.HoldingPrimaryReplicas { - if err := oprot.WriteI32(int32(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteSetBegin(thrift.STRUCT, len(v)); err != nil { - return thrift.PrependError("error writing set begin: ", err) - } - for i := 0; i < len(v); i++ { - for j := i + 1; j < len(v); j++ { - if reflect.DeepEqual(v[i], v[j]) { - return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", v[i])) - } - } - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteSetEnd(); err != nil { - return thrift.PrependError("error writing set end: ", err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:holding_primary_replicas: ", p), err) - } - return err -} - -func (p *DiskInfo) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("holding_secondary_replicas", thrift.MAP, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:holding_secondary_replicas: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.I32, thrift.SET, len(p.HoldingSecondaryReplicas)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.HoldingSecondaryReplicas { - if err := oprot.WriteI32(int32(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteSetBegin(thrift.STRUCT, len(v)); err != nil { - return thrift.PrependError("error writing set begin: ", err) - } - for i := 0; i < len(v); i++ { - for j := i + 1; j < len(v); j++ { - if reflect.DeepEqual(v[i], v[j]) { - return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", v[i])) - } - } - } - for _, v := range v { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteSetEnd(); err != nil { - return thrift.PrependError("error writing set end: ", err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:holding_secondary_replicas: ", p), err) - } - return err -} - -func (p *DiskInfo) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DiskInfo(%+v)", *p) -} - -// Attributes: -// - Node -// - AppName -type QueryDiskInfoRequest struct { - Node *base.RPCAddress `thrift:"node,1" db:"node" json:"node"` - AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` -} - -func NewQueryDiskInfoRequest() *QueryDiskInfoRequest { - return &QueryDiskInfoRequest{} -} - -var QueryDiskInfoRequest_Node_DEFAULT *base.RPCAddress - -func (p *QueryDiskInfoRequest) GetNode() *base.RPCAddress { - if !p.IsSetNode() { - return QueryDiskInfoRequest_Node_DEFAULT - } - return p.Node -} - -func (p *QueryDiskInfoRequest) GetAppName() string { - return p.AppName -} -func (p *QueryDiskInfoRequest) IsSetNode() bool { - return p.Node != nil -} - -func (p *QueryDiskInfoRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryDiskInfoRequest) ReadField1(iprot thrift.TProtocol) error { - p.Node = &base.RPCAddress{} - if err := p.Node.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) - } - return nil -} - -func (p *QueryDiskInfoRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *QueryDiskInfoRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_disk_info_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryDiskInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node: ", p), err) - } - if err := p.Node.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node: ", p), err) - } - return err -} - -func (p *QueryDiskInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) - } - return err -} - -func (p *QueryDiskInfoRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryDiskInfoRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - TotalCapacityMb -// - TotalAvailableMb -// - DiskInfos -type QueryDiskInfoResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - TotalCapacityMb int64 `thrift:"total_capacity_mb,2" db:"total_capacity_mb" json:"total_capacity_mb"` - TotalAvailableMb int64 `thrift:"total_available_mb,3" db:"total_available_mb" json:"total_available_mb"` - DiskInfos []*DiskInfo `thrift:"disk_infos,4" db:"disk_infos" json:"disk_infos"` -} - -func NewQueryDiskInfoResponse() *QueryDiskInfoResponse { - return &QueryDiskInfoResponse{} -} - -var QueryDiskInfoResponse_Err_DEFAULT *base.ErrorCode - -func (p *QueryDiskInfoResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return QueryDiskInfoResponse_Err_DEFAULT - } - return p.Err -} - -func (p *QueryDiskInfoResponse) GetTotalCapacityMb() int64 { - return p.TotalCapacityMb -} - -func (p *QueryDiskInfoResponse) GetTotalAvailableMb() int64 { - return p.TotalAvailableMb -} - -func (p *QueryDiskInfoResponse) GetDiskInfos() []*DiskInfo { - return p.DiskInfos -} -func (p *QueryDiskInfoResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *QueryDiskInfoResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryDiskInfoResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *QueryDiskInfoResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TotalCapacityMb = v - } - return nil -} - -func (p *QueryDiskInfoResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.TotalAvailableMb = v - } - return nil -} - -func (p *QueryDiskInfoResponse) ReadField4(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*DiskInfo, 0, size) - p.DiskInfos = tSlice - for i := 0; i < size; i++ { - _elem6 := &DiskInfo{} - if err := _elem6.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) - } - p.DiskInfos = append(p.DiskInfos, _elem6) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *QueryDiskInfoResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_disk_info_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryDiskInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *QueryDiskInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("total_capacity_mb", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_capacity_mb: ", p), err) - } - if err := oprot.WriteI64(int64(p.TotalCapacityMb)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.total_capacity_mb (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:total_capacity_mb: ", p), err) - } - return err -} - -func (p *QueryDiskInfoResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("total_available_mb", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:total_available_mb: ", p), err) - } - if err := oprot.WriteI64(int64(p.TotalAvailableMb)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.total_available_mb (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:total_available_mb: ", p), err) - } - return err -} - -func (p *QueryDiskInfoResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("disk_infos", thrift.LIST, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:disk_infos: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DiskInfos)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.DiskInfos { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:disk_infos: ", p), err) - } - return err -} - -func (p *QueryDiskInfoResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryDiskInfoResponse(%+v)", *p) -} - -// Attributes: -// - Pid -// - OriginDisk -// - TargetDisk -type ReplicaDiskMigrateRequest struct { - Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` - OriginDisk string `thrift:"origin_disk,2" db:"origin_disk" json:"origin_disk"` - TargetDisk string `thrift:"target_disk,3" db:"target_disk" json:"target_disk"` -} - -func NewReplicaDiskMigrateRequest() *ReplicaDiskMigrateRequest { - return &ReplicaDiskMigrateRequest{} -} - -var ReplicaDiskMigrateRequest_Pid_DEFAULT *base.Gpid - -func (p *ReplicaDiskMigrateRequest) GetPid() *base.Gpid { - if !p.IsSetPid() { - return ReplicaDiskMigrateRequest_Pid_DEFAULT - } - return p.Pid -} - -func (p *ReplicaDiskMigrateRequest) GetOriginDisk() string { - return p.OriginDisk -} - -func (p *ReplicaDiskMigrateRequest) GetTargetDisk() string { - return p.TargetDisk -} -func (p *ReplicaDiskMigrateRequest) IsSetPid() bool { - return p.Pid != nil -} - -func (p *ReplicaDiskMigrateRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ReplicaDiskMigrateRequest) ReadField1(iprot thrift.TProtocol) error { - p.Pid = &base.Gpid{} - if err := p.Pid.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) - } - return nil -} - -func (p *ReplicaDiskMigrateRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.OriginDisk = v - } - return nil -} - -func (p *ReplicaDiskMigrateRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.TargetDisk = v - } - return nil -} - -func (p *ReplicaDiskMigrateRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("replica_disk_migrate_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ReplicaDiskMigrateRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) - } - if err := p.Pid.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) - } - return err -} - -func (p *ReplicaDiskMigrateRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("origin_disk", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:origin_disk: ", p), err) - } - if err := oprot.WriteString(string(p.OriginDisk)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.origin_disk (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:origin_disk: ", p), err) - } - return err -} - -func (p *ReplicaDiskMigrateRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("target_disk", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:target_disk: ", p), err) - } - if err := oprot.WriteString(string(p.TargetDisk)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.target_disk (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:target_disk: ", p), err) - } - return err -} - -func (p *ReplicaDiskMigrateRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ReplicaDiskMigrateRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - Hint -type ReplicaDiskMigrateResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - Hint *string `thrift:"hint,2" db:"hint" json:"hint,omitempty"` -} - -func NewReplicaDiskMigrateResponse() *ReplicaDiskMigrateResponse { - return &ReplicaDiskMigrateResponse{} -} - -var ReplicaDiskMigrateResponse_Err_DEFAULT *base.ErrorCode - -func (p *ReplicaDiskMigrateResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return ReplicaDiskMigrateResponse_Err_DEFAULT - } - return p.Err -} - -var ReplicaDiskMigrateResponse_Hint_DEFAULT string - -func (p *ReplicaDiskMigrateResponse) GetHint() string { - if !p.IsSetHint() { - return ReplicaDiskMigrateResponse_Hint_DEFAULT - } - return *p.Hint -} -func (p *ReplicaDiskMigrateResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *ReplicaDiskMigrateResponse) IsSetHint() bool { - return p.Hint != nil -} - -func (p *ReplicaDiskMigrateResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ReplicaDiskMigrateResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *ReplicaDiskMigrateResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Hint = &v - } - return nil -} - -func (p *ReplicaDiskMigrateResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("replica_disk_migrate_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ReplicaDiskMigrateResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *ReplicaDiskMigrateResponse) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetHint() { - if err := oprot.WriteFieldBegin("hint", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint: ", p), err) - } - if err := oprot.WriteString(string(*p.Hint)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.hint (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint: ", p), err) - } - } - return err -} - -func (p *ReplicaDiskMigrateResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ReplicaDiskMigrateResponse(%+v)", *p) -} - -type ReplicaClient interface { - // Parameters: - // - Req - QueryDiskInfo(ctx context.Context, req *QueryDiskInfoRequest) (r *QueryDiskInfoResponse, err error) - // Parameters: - // - Req - DiskMigrate(ctx context.Context, req *ReplicaDiskMigrateRequest) (r *ReplicaDiskMigrateResponse, err error) -} - -type ReplicaClientClient struct { - c thrift.TClient -} - -func NewReplicaClientClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ReplicaClientClient { - return &ReplicaClientClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewReplicaClientClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ReplicaClientClient { - return &ReplicaClientClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewReplicaClientClient(c thrift.TClient) *ReplicaClientClient { - return &ReplicaClientClient{ - c: c, - } -} - -func (p *ReplicaClientClient) Client_() thrift.TClient { - return p.c -} - -// Parameters: -// - Req -func (p *ReplicaClientClient) QueryDiskInfo(ctx context.Context, req *QueryDiskInfoRequest) (r *QueryDiskInfoResponse, err error) { - var _args7 ReplicaClientQueryDiskInfoArgs - _args7.Req = req - var _result8 ReplicaClientQueryDiskInfoResult - if err = p.Client_().Call(ctx, "query_disk_info", &_args7, &_result8); err != nil { - return - } - return _result8.GetSuccess(), nil -} - -// Parameters: -// - Req -func (p *ReplicaClientClient) DiskMigrate(ctx context.Context, req *ReplicaDiskMigrateRequest) (r *ReplicaDiskMigrateResponse, err error) { - var _args9 ReplicaClientDiskMigrateArgs - _args9.Req = req - var _result10 ReplicaClientDiskMigrateResult - if err = p.Client_().Call(ctx, "disk_migrate", &_args9, &_result10); err != nil { - return - } - return _result10.GetSuccess(), nil -} - -type ReplicaClientProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler ReplicaClient -} - -func (p *ReplicaClientProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *ReplicaClientProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *ReplicaClientProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewReplicaClientProcessor(handler ReplicaClient) *ReplicaClientProcessor { - - self11 := &ReplicaClientProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self11.processorMap["query_disk_info"] = &replicaClientProcessorQueryDiskInfo{handler: handler} - self11.processorMap["disk_migrate"] = &replicaClientProcessorDiskMigrate{handler: handler} - return self11 -} - -func (p *ReplicaClientProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x12 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x12.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x12 - -} - -type replicaClientProcessorQueryDiskInfo struct { - handler ReplicaClient -} - -func (p *replicaClientProcessorQueryDiskInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ReplicaClientQueryDiskInfoArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("query_disk_info", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := ReplicaClientQueryDiskInfoResult{} - var retval *QueryDiskInfoResponse - var err2 error - if retval, err2 = p.handler.QueryDiskInfo(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_disk_info: "+err2.Error()) - oprot.WriteMessageBegin("query_disk_info", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("query_disk_info", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type replicaClientProcessorDiskMigrate struct { - handler ReplicaClient -} - -func (p *replicaClientProcessorDiskMigrate) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ReplicaClientDiskMigrateArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("disk_migrate", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := ReplicaClientDiskMigrateResult{} - var retval *ReplicaDiskMigrateResponse - var err2 error - if retval, err2 = p.handler.DiskMigrate(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing disk_migrate: "+err2.Error()) - oprot.WriteMessageBegin("disk_migrate", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("disk_migrate", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Req -type ReplicaClientQueryDiskInfoArgs struct { - Req *QueryDiskInfoRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewReplicaClientQueryDiskInfoArgs() *ReplicaClientQueryDiskInfoArgs { - return &ReplicaClientQueryDiskInfoArgs{} -} - -var ReplicaClientQueryDiskInfoArgs_Req_DEFAULT *QueryDiskInfoRequest - -func (p *ReplicaClientQueryDiskInfoArgs) GetReq() *QueryDiskInfoRequest { - if !p.IsSetReq() { - return ReplicaClientQueryDiskInfoArgs_Req_DEFAULT - } - return p.Req -} -func (p *ReplicaClientQueryDiskInfoArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *ReplicaClientQueryDiskInfoArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ReplicaClientQueryDiskInfoArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &QueryDiskInfoRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *ReplicaClientQueryDiskInfoArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_disk_info_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ReplicaClientQueryDiskInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *ReplicaClientQueryDiskInfoArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ReplicaClientQueryDiskInfoArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ReplicaClientQueryDiskInfoResult struct { - Success *QueryDiskInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewReplicaClientQueryDiskInfoResult() *ReplicaClientQueryDiskInfoResult { - return &ReplicaClientQueryDiskInfoResult{} -} - -var ReplicaClientQueryDiskInfoResult_Success_DEFAULT *QueryDiskInfoResponse - -func (p *ReplicaClientQueryDiskInfoResult) GetSuccess() *QueryDiskInfoResponse { - if !p.IsSetSuccess() { - return ReplicaClientQueryDiskInfoResult_Success_DEFAULT - } - return p.Success -} -func (p *ReplicaClientQueryDiskInfoResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ReplicaClientQueryDiskInfoResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ReplicaClientQueryDiskInfoResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &QueryDiskInfoResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *ReplicaClientQueryDiskInfoResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_disk_info_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ReplicaClientQueryDiskInfoResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *ReplicaClientQueryDiskInfoResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ReplicaClientQueryDiskInfoResult(%+v)", *p) -} - -// Attributes: -// - Req -type ReplicaClientDiskMigrateArgs struct { - Req *ReplicaDiskMigrateRequest `thrift:"req,1" db:"req" json:"req"` -} - -func NewReplicaClientDiskMigrateArgs() *ReplicaClientDiskMigrateArgs { - return &ReplicaClientDiskMigrateArgs{} -} - -var ReplicaClientDiskMigrateArgs_Req_DEFAULT *ReplicaDiskMigrateRequest - -func (p *ReplicaClientDiskMigrateArgs) GetReq() *ReplicaDiskMigrateRequest { - if !p.IsSetReq() { - return ReplicaClientDiskMigrateArgs_Req_DEFAULT - } - return p.Req -} -func (p *ReplicaClientDiskMigrateArgs) IsSetReq() bool { - return p.Req != nil -} - -func (p *ReplicaClientDiskMigrateArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ReplicaClientDiskMigrateArgs) ReadField1(iprot thrift.TProtocol) error { - p.Req = &ReplicaDiskMigrateRequest{} - if err := p.Req.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) - } - return nil -} - -func (p *ReplicaClientDiskMigrateArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("disk_migrate_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ReplicaClientDiskMigrateArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) - } - if err := p.Req.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) - } - return err -} - -func (p *ReplicaClientDiskMigrateArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ReplicaClientDiskMigrateArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ReplicaClientDiskMigrateResult struct { - Success *ReplicaDiskMigrateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewReplicaClientDiskMigrateResult() *ReplicaClientDiskMigrateResult { - return &ReplicaClientDiskMigrateResult{} -} - -var ReplicaClientDiskMigrateResult_Success_DEFAULT *ReplicaDiskMigrateResponse - -func (p *ReplicaClientDiskMigrateResult) GetSuccess() *ReplicaDiskMigrateResponse { - if !p.IsSetSuccess() { - return ReplicaClientDiskMigrateResult_Success_DEFAULT - } - return p.Success -} -func (p *ReplicaClientDiskMigrateResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ReplicaClientDiskMigrateResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ReplicaClientDiskMigrateResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ReplicaDiskMigrateResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *ReplicaClientDiskMigrateResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("disk_migrate_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ReplicaClientDiskMigrateResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *ReplicaClientDiskMigrateResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ReplicaClientDiskMigrateResult(%+v)", *p) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/GoUnusedProtection__.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/GoUnusedProtection__.go deleted file mode 100644 index 20c1f2d..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package replication - -var GoUnusedProtection__ int diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/replication-consts.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/replication-consts.go deleted file mode 100644 index cebd74e..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/replication-consts.go +++ /dev/null @@ -1,25 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package replication - -import ( - "bytes" - "context" - "fmt" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/replication.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/replication.go deleted file mode 100644 index 1e0df68..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/replication/replication.go +++ /dev/null @@ -1,872 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package replication - -import ( - "bytes" - "context" - "fmt" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ - -// Attributes: -// - Pid -// - Ballot -// - MaxReplicaCount -// - Primary -// - Secondaries -// - LastDrops -// - LastCommittedDecree -type PartitionConfiguration struct { - Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` - Ballot int64 `thrift:"ballot,2" db:"ballot" json:"ballot"` - MaxReplicaCount int32 `thrift:"max_replica_count,3" db:"max_replica_count" json:"max_replica_count"` - Primary *base.RPCAddress `thrift:"primary,4" db:"primary" json:"primary"` - Secondaries []*base.RPCAddress `thrift:"secondaries,5" db:"secondaries" json:"secondaries"` - LastDrops []*base.RPCAddress `thrift:"last_drops,6" db:"last_drops" json:"last_drops"` - LastCommittedDecree int64 `thrift:"last_committed_decree,7" db:"last_committed_decree" json:"last_committed_decree"` -} - -func NewPartitionConfiguration() *PartitionConfiguration { - return &PartitionConfiguration{} -} - -var PartitionConfiguration_Pid_DEFAULT *base.Gpid - -func (p *PartitionConfiguration) GetPid() *base.Gpid { - if !p.IsSetPid() { - return PartitionConfiguration_Pid_DEFAULT - } - return p.Pid -} - -func (p *PartitionConfiguration) GetBallot() int64 { - return p.Ballot -} - -func (p *PartitionConfiguration) GetMaxReplicaCount() int32 { - return p.MaxReplicaCount -} - -var PartitionConfiguration_Primary_DEFAULT *base.RPCAddress - -func (p *PartitionConfiguration) GetPrimary() *base.RPCAddress { - if !p.IsSetPrimary() { - return PartitionConfiguration_Primary_DEFAULT - } - return p.Primary -} - -func (p *PartitionConfiguration) GetSecondaries() []*base.RPCAddress { - return p.Secondaries -} - -func (p *PartitionConfiguration) GetLastDrops() []*base.RPCAddress { - return p.LastDrops -} - -func (p *PartitionConfiguration) GetLastCommittedDecree() int64 { - return p.LastCommittedDecree -} -func (p *PartitionConfiguration) IsSetPid() bool { - return p.Pid != nil -} - -func (p *PartitionConfiguration) IsSetPrimary() bool { - return p.Primary != nil -} - -func (p *PartitionConfiguration) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.LIST { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *PartitionConfiguration) ReadField1(iprot thrift.TProtocol) error { - p.Pid = &base.Gpid{} - if err := p.Pid.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) - } - return nil -} - -func (p *PartitionConfiguration) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Ballot = v - } - return nil -} - -func (p *PartitionConfiguration) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.MaxReplicaCount = v - } - return nil -} - -func (p *PartitionConfiguration) ReadField4(iprot thrift.TProtocol) error { - p.Primary = &base.RPCAddress{} - if err := p.Primary.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) - } - return nil -} - -func (p *PartitionConfiguration) ReadField5(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*base.RPCAddress, 0, size) - p.Secondaries = tSlice - for i := 0; i < size; i++ { - _elem0 := &base.RPCAddress{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Secondaries = append(p.Secondaries, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *PartitionConfiguration) ReadField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*base.RPCAddress, 0, size) - p.LastDrops = tSlice - for i := 0; i < size; i++ { - _elem1 := &base.RPCAddress{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.LastDrops = append(p.LastDrops, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *PartitionConfiguration) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.LastCommittedDecree = v - } - return nil -} - -func (p *PartitionConfiguration) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("partition_configuration"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *PartitionConfiguration) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) - } - if err := p.Pid.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) - } - return err -} - -func (p *PartitionConfiguration) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ballot", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ballot: ", p), err) - } - if err := oprot.WriteI64(int64(p.Ballot)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ballot (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ballot: ", p), err) - } - return err -} - -func (p *PartitionConfiguration) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_replica_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_replica_count: ", p), err) - } - return err -} - -func (p *PartitionConfiguration) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:primary: ", p), err) - } - if err := p.Primary.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:primary: ", p), err) - } - return err -} - -func (p *PartitionConfiguration) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("secondaries", thrift.LIST, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:secondaries: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Secondaries)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Secondaries { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:secondaries: ", p), err) - } - return err -} - -func (p *PartitionConfiguration) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("last_drops", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:last_drops: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.LastDrops)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.LastDrops { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:last_drops: ", p), err) - } - return err -} - -func (p *PartitionConfiguration) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("last_committed_decree", thrift.I64, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:last_committed_decree: ", p), err) - } - if err := oprot.WriteI64(int64(p.LastCommittedDecree)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.last_committed_decree (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:last_committed_decree: ", p), err) - } - return err -} - -func (p *PartitionConfiguration) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PartitionConfiguration(%+v)", *p) -} - -// Attributes: -// - AppName -// - PartitionIndices -type QueryCfgRequest struct { - AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` - PartitionIndices []int32 `thrift:"partition_indices,2" db:"partition_indices" json:"partition_indices"` -} - -func NewQueryCfgRequest() *QueryCfgRequest { - return &QueryCfgRequest{} -} - -func (p *QueryCfgRequest) GetAppName() string { - return p.AppName -} - -func (p *QueryCfgRequest) GetPartitionIndices() []int32 { - return p.PartitionIndices -} -func (p *QueryCfgRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryCfgRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.AppName = v - } - return nil -} - -func (p *QueryCfgRequest) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]int32, 0, size) - p.PartitionIndices = tSlice - for i := 0; i < size; i++ { - var _elem2 int32 - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem2 = v - } - p.PartitionIndices = append(p.PartitionIndices, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *QueryCfgRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_cfg_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryCfgRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) - } - if err := oprot.WriteString(string(p.AppName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) - } - return err -} - -func (p *QueryCfgRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_indices", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partition_indices: ", p), err) - } - if err := oprot.WriteListBegin(thrift.I32, len(p.PartitionIndices)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PartitionIndices { - if err := oprot.WriteI32(int32(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partition_indices: ", p), err) - } - return err -} - -func (p *QueryCfgRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryCfgRequest(%+v)", *p) -} - -// Attributes: -// - Err -// - AppID -// - PartitionCount -// - IsStateful -// - Partitions -type QueryCfgResponse struct { - Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` - AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` - PartitionCount int32 `thrift:"partition_count,3" db:"partition_count" json:"partition_count"` - IsStateful bool `thrift:"is_stateful,4" db:"is_stateful" json:"is_stateful"` - Partitions []*PartitionConfiguration `thrift:"partitions,5" db:"partitions" json:"partitions"` -} - -func NewQueryCfgResponse() *QueryCfgResponse { - return &QueryCfgResponse{} -} - -var QueryCfgResponse_Err_DEFAULT *base.ErrorCode - -func (p *QueryCfgResponse) GetErr() *base.ErrorCode { - if !p.IsSetErr() { - return QueryCfgResponse_Err_DEFAULT - } - return p.Err -} - -func (p *QueryCfgResponse) GetAppID() int32 { - return p.AppID -} - -func (p *QueryCfgResponse) GetPartitionCount() int32 { - return p.PartitionCount -} - -func (p *QueryCfgResponse) GetIsStateful() bool { - return p.IsStateful -} - -func (p *QueryCfgResponse) GetPartitions() []*PartitionConfiguration { - return p.Partitions -} -func (p *QueryCfgResponse) IsSetErr() bool { - return p.Err != nil -} - -func (p *QueryCfgResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.LIST { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *QueryCfgResponse) ReadField1(iprot thrift.TProtocol) error { - p.Err = &base.ErrorCode{} - if err := p.Err.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) - } - return nil -} - -func (p *QueryCfgResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *QueryCfgResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.PartitionCount = v - } - return nil -} - -func (p *QueryCfgResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.IsStateful = v - } - return nil -} - -func (p *QueryCfgResponse) ReadField5(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*PartitionConfiguration, 0, size) - p.Partitions = tSlice - for i := 0; i < size; i++ { - _elem3 := &PartitionConfiguration{} - if err := _elem3.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Partitions = append(p.Partitions, _elem3) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *QueryCfgResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_cfg_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *QueryCfgResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) - } - if err := p.Err.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) - } - return err -} - -func (p *QueryCfgResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) - } - return err -} - -func (p *QueryCfgResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_count (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_count: ", p), err) - } - return err -} - -func (p *QueryCfgResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:is_stateful: ", p), err) - } - if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.is_stateful (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:is_stateful: ", p), err) - } - return err -} - -func (p *QueryCfgResponse) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partitions", thrift.LIST, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partitions: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Partitions { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partitions: ", p), err) - } - return err -} - -func (p *QueryCfgResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("QueryCfgResponse(%+v)", *p) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/GoUnusedProtection__.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/GoUnusedProtection__.go deleted file mode 100644 index ba17969..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package rrdb - -var GoUnusedProtection__ int diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/rrdb-consts.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/rrdb-consts.go deleted file mode 100644 index 7e08a4e..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/rrdb-consts.go +++ /dev/null @@ -1,27 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package rrdb - -import ( - "bytes" - "context" - "fmt" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/replication" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ -var _ = replication.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/rrdb.go b/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/rrdb.go deleted file mode 100644 index 89ea94d..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/idl/rrdb/rrdb.go +++ /dev/null @@ -1,9412 +0,0 @@ -// Autogenerated by Thrift Compiler (0.13.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package rrdb - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/replication" - "github.com/pegasus-kv/thrift/lib/go/thrift" - "reflect" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -var _ = base.GoUnusedProtection__ -var _ = replication.GoUnusedProtection__ - -type FilterType int64 - -const ( - FilterType_FT_NO_FILTER FilterType = 0 - FilterType_FT_MATCH_ANYWHERE FilterType = 1 - FilterType_FT_MATCH_PREFIX FilterType = 2 - FilterType_FT_MATCH_POSTFIX FilterType = 3 -) - -func (p FilterType) String() string { - switch p { - case FilterType_FT_NO_FILTER: - return "FT_NO_FILTER" - case FilterType_FT_MATCH_ANYWHERE: - return "FT_MATCH_ANYWHERE" - case FilterType_FT_MATCH_PREFIX: - return "FT_MATCH_PREFIX" - case FilterType_FT_MATCH_POSTFIX: - return "FT_MATCH_POSTFIX" - } - return "" -} - -func FilterTypeFromString(s string) (FilterType, error) { - switch s { - case "FT_NO_FILTER": - return FilterType_FT_NO_FILTER, nil - case "FT_MATCH_ANYWHERE": - return FilterType_FT_MATCH_ANYWHERE, nil - case "FT_MATCH_PREFIX": - return FilterType_FT_MATCH_PREFIX, nil - case "FT_MATCH_POSTFIX": - return FilterType_FT_MATCH_POSTFIX, nil - } - return FilterType(0), fmt.Errorf("not a valid FilterType string") -} - -func FilterTypePtr(v FilterType) *FilterType { return &v } - -func (p FilterType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *FilterType) UnmarshalText(text []byte) error { - q, err := FilterTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *FilterType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = FilterType(v) - return nil -} - -func (p *FilterType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type CasCheckType int64 - -const ( - CasCheckType_CT_NO_CHECK CasCheckType = 0 - CasCheckType_CT_VALUE_NOT_EXIST CasCheckType = 1 - CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY CasCheckType = 2 - CasCheckType_CT_VALUE_EXIST CasCheckType = 3 - CasCheckType_CT_VALUE_NOT_EMPTY CasCheckType = 4 - CasCheckType_CT_VALUE_MATCH_ANYWHERE CasCheckType = 5 - CasCheckType_CT_VALUE_MATCH_PREFIX CasCheckType = 6 - CasCheckType_CT_VALUE_MATCH_POSTFIX CasCheckType = 7 - CasCheckType_CT_VALUE_BYTES_LESS CasCheckType = 8 - CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL CasCheckType = 9 - CasCheckType_CT_VALUE_BYTES_EQUAL CasCheckType = 10 - CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL CasCheckType = 11 - CasCheckType_CT_VALUE_BYTES_GREATER CasCheckType = 12 - CasCheckType_CT_VALUE_INT_LESS CasCheckType = 13 - CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL CasCheckType = 14 - CasCheckType_CT_VALUE_INT_EQUAL CasCheckType = 15 - CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL CasCheckType = 16 - CasCheckType_CT_VALUE_INT_GREATER CasCheckType = 17 -) - -func (p CasCheckType) String() string { - switch p { - case CasCheckType_CT_NO_CHECK: - return "CT_NO_CHECK" - case CasCheckType_CT_VALUE_NOT_EXIST: - return "CT_VALUE_NOT_EXIST" - case CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY: - return "CT_VALUE_NOT_EXIST_OR_EMPTY" - case CasCheckType_CT_VALUE_EXIST: - return "CT_VALUE_EXIST" - case CasCheckType_CT_VALUE_NOT_EMPTY: - return "CT_VALUE_NOT_EMPTY" - case CasCheckType_CT_VALUE_MATCH_ANYWHERE: - return "CT_VALUE_MATCH_ANYWHERE" - case CasCheckType_CT_VALUE_MATCH_PREFIX: - return "CT_VALUE_MATCH_PREFIX" - case CasCheckType_CT_VALUE_MATCH_POSTFIX: - return "CT_VALUE_MATCH_POSTFIX" - case CasCheckType_CT_VALUE_BYTES_LESS: - return "CT_VALUE_BYTES_LESS" - case CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL: - return "CT_VALUE_BYTES_LESS_OR_EQUAL" - case CasCheckType_CT_VALUE_BYTES_EQUAL: - return "CT_VALUE_BYTES_EQUAL" - case CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL: - return "CT_VALUE_BYTES_GREATER_OR_EQUAL" - case CasCheckType_CT_VALUE_BYTES_GREATER: - return "CT_VALUE_BYTES_GREATER" - case CasCheckType_CT_VALUE_INT_LESS: - return "CT_VALUE_INT_LESS" - case CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL: - return "CT_VALUE_INT_LESS_OR_EQUAL" - case CasCheckType_CT_VALUE_INT_EQUAL: - return "CT_VALUE_INT_EQUAL" - case CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL: - return "CT_VALUE_INT_GREATER_OR_EQUAL" - case CasCheckType_CT_VALUE_INT_GREATER: - return "CT_VALUE_INT_GREATER" - } - return "" -} - -func CasCheckTypeFromString(s string) (CasCheckType, error) { - switch s { - case "CT_NO_CHECK": - return CasCheckType_CT_NO_CHECK, nil - case "CT_VALUE_NOT_EXIST": - return CasCheckType_CT_VALUE_NOT_EXIST, nil - case "CT_VALUE_NOT_EXIST_OR_EMPTY": - return CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY, nil - case "CT_VALUE_EXIST": - return CasCheckType_CT_VALUE_EXIST, nil - case "CT_VALUE_NOT_EMPTY": - return CasCheckType_CT_VALUE_NOT_EMPTY, nil - case "CT_VALUE_MATCH_ANYWHERE": - return CasCheckType_CT_VALUE_MATCH_ANYWHERE, nil - case "CT_VALUE_MATCH_PREFIX": - return CasCheckType_CT_VALUE_MATCH_PREFIX, nil - case "CT_VALUE_MATCH_POSTFIX": - return CasCheckType_CT_VALUE_MATCH_POSTFIX, nil - case "CT_VALUE_BYTES_LESS": - return CasCheckType_CT_VALUE_BYTES_LESS, nil - case "CT_VALUE_BYTES_LESS_OR_EQUAL": - return CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL, nil - case "CT_VALUE_BYTES_EQUAL": - return CasCheckType_CT_VALUE_BYTES_EQUAL, nil - case "CT_VALUE_BYTES_GREATER_OR_EQUAL": - return CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL, nil - case "CT_VALUE_BYTES_GREATER": - return CasCheckType_CT_VALUE_BYTES_GREATER, nil - case "CT_VALUE_INT_LESS": - return CasCheckType_CT_VALUE_INT_LESS, nil - case "CT_VALUE_INT_LESS_OR_EQUAL": - return CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL, nil - case "CT_VALUE_INT_EQUAL": - return CasCheckType_CT_VALUE_INT_EQUAL, nil - case "CT_VALUE_INT_GREATER_OR_EQUAL": - return CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL, nil - case "CT_VALUE_INT_GREATER": - return CasCheckType_CT_VALUE_INT_GREATER, nil - } - return CasCheckType(0), fmt.Errorf("not a valid CasCheckType string") -} - -func CasCheckTypePtr(v CasCheckType) *CasCheckType { return &v } - -func (p CasCheckType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *CasCheckType) UnmarshalText(text []byte) error { - q, err := CasCheckTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *CasCheckType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = CasCheckType(v) - return nil -} - -func (p *CasCheckType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Attributes: -// - Key -// - Value -// - ExpireTsSeconds -type UpdateRequest struct { - Key *base.Blob `thrift:"key,1" db:"key" json:"key"` - Value *base.Blob `thrift:"value,2" db:"value" json:"value"` - ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` -} - -func NewUpdateRequest() *UpdateRequest { - return &UpdateRequest{} -} - -var UpdateRequest_Key_DEFAULT *base.Blob - -func (p *UpdateRequest) GetKey() *base.Blob { - if !p.IsSetKey() { - return UpdateRequest_Key_DEFAULT - } - return p.Key -} - -var UpdateRequest_Value_DEFAULT *base.Blob - -func (p *UpdateRequest) GetValue() *base.Blob { - if !p.IsSetValue() { - return UpdateRequest_Value_DEFAULT - } - return p.Value -} - -func (p *UpdateRequest) GetExpireTsSeconds() int32 { - return p.ExpireTsSeconds -} -func (p *UpdateRequest) IsSetKey() bool { - return p.Key != nil -} - -func (p *UpdateRequest) IsSetValue() bool { - return p.Value != nil -} - -func (p *UpdateRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *UpdateRequest) ReadField1(iprot thrift.TProtocol) error { - p.Key = &base.Blob{} - if err := p.Key.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) - } - return nil -} - -func (p *UpdateRequest) ReadField2(iprot thrift.TProtocol) error { - p.Value = &base.Blob{} - if err := p.Value.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) - } - return nil -} - -func (p *UpdateRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ExpireTsSeconds = v - } - return nil -} - -func (p *UpdateRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("update_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *UpdateRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := p.Key.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *UpdateRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := p.Value.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *UpdateRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) - } - if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) - } - return err -} - -func (p *UpdateRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("UpdateRequest(%+v)", *p) -} - -// Attributes: -// - Error -// - AppID -// - PartitionIndex -// - Decree -// - Server -type UpdateResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,3" db:"partition_index" json:"partition_index"` - Decree int64 `thrift:"decree,4" db:"decree" json:"decree"` - Server string `thrift:"server,5" db:"server" json:"server"` -} - -func NewUpdateResponse() *UpdateResponse { - return &UpdateResponse{} -} - -func (p *UpdateResponse) GetError() int32 { - return p.Error -} - -func (p *UpdateResponse) GetAppID() int32 { - return p.AppID -} - -func (p *UpdateResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *UpdateResponse) GetDecree() int64 { - return p.Decree -} - -func (p *UpdateResponse) GetServer() string { - return p.Server -} -func (p *UpdateResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *UpdateResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *UpdateResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *UpdateResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *UpdateResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Decree = v - } - return nil -} - -func (p *UpdateResponse) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *UpdateResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("update_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *UpdateResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *UpdateResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) - } - return err -} - -func (p *UpdateResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_index: ", p), err) - } - return err -} - -func (p *UpdateResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("decree", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:decree: ", p), err) - } - if err := oprot.WriteI64(int64(p.Decree)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.decree (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:decree: ", p), err) - } - return err -} - -func (p *UpdateResponse) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:server: ", p), err) - } - return err -} - -func (p *UpdateResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("UpdateResponse(%+v)", *p) -} - -// Attributes: -// - Error -// - Value -// - AppID -// - PartitionIndex -// - Server -type ReadResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - Value *base.Blob `thrift:"value,2" db:"value" json:"value"` - AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` - // unused field # 5 - Server string `thrift:"server,6" db:"server" json:"server"` -} - -func NewReadResponse() *ReadResponse { - return &ReadResponse{} -} - -func (p *ReadResponse) GetError() int32 { - return p.Error -} - -var ReadResponse_Value_DEFAULT *base.Blob - -func (p *ReadResponse) GetValue() *base.Blob { - if !p.IsSetValue() { - return ReadResponse_Value_DEFAULT - } - return p.Value -} - -func (p *ReadResponse) GetAppID() int32 { - return p.AppID -} - -func (p *ReadResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *ReadResponse) GetServer() string { - return p.Server -} -func (p *ReadResponse) IsSetValue() bool { - return p.Value != nil -} - -func (p *ReadResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ReadResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *ReadResponse) ReadField2(iprot thrift.TProtocol) error { - p.Value = &base.Blob{} - if err := p.Value.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) - } - return nil -} - -func (p *ReadResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *ReadResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *ReadResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *ReadResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("read_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ReadResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *ReadResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := p.Value.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *ReadResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) - } - return err -} - -func (p *ReadResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) - } - return err -} - -func (p *ReadResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) - } - return err -} - -func (p *ReadResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ReadResponse(%+v)", *p) -} - -// Attributes: -// - Error -// - TTLSeconds -// - AppID -// - PartitionIndex -// - Server -type TTLResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - TTLSeconds int32 `thrift:"ttl_seconds,2" db:"ttl_seconds" json:"ttl_seconds"` - AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` - // unused field # 5 - Server string `thrift:"server,6" db:"server" json:"server"` -} - -func NewTTLResponse() *TTLResponse { - return &TTLResponse{} -} - -func (p *TTLResponse) GetError() int32 { - return p.Error -} - -func (p *TTLResponse) GetTTLSeconds() int32 { - return p.TTLSeconds -} - -func (p *TTLResponse) GetAppID() int32 { - return p.AppID -} - -func (p *TTLResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *TTLResponse) GetServer() string { - return p.Server -} -func (p *TTLResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TTLResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *TTLResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TTLSeconds = v - } - return nil -} - -func (p *TTLResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *TTLResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *TTLResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *TTLResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ttl_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TTLResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *TTLResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ttl_seconds", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ttl_seconds: ", p), err) - } - if err := oprot.WriteI32(int32(p.TTLSeconds)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ttl_seconds (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ttl_seconds: ", p), err) - } - return err -} - -func (p *TTLResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) - } - return err -} - -func (p *TTLResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) - } - return err -} - -func (p *TTLResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) - } - return err -} - -func (p *TTLResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TTLResponse(%+v)", *p) -} - -// Attributes: -// - Error -// - Count -// - AppID -// - PartitionIndex -// - Server -type CountResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - Count int64 `thrift:"count,2" db:"count" json:"count"` - AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` - // unused field # 5 - Server string `thrift:"server,6" db:"server" json:"server"` -} - -func NewCountResponse() *CountResponse { - return &CountResponse{} -} - -func (p *CountResponse) GetError() int32 { - return p.Error -} - -func (p *CountResponse) GetCount() int64 { - return p.Count -} - -func (p *CountResponse) GetAppID() int32 { - return p.AppID -} - -func (p *CountResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *CountResponse) GetServer() string { - return p.Server -} -func (p *CountResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CountResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *CountResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Count = v - } - return nil -} - -func (p *CountResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *CountResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *CountResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *CountResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("count_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CountResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *CountResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("count", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:count: ", p), err) - } - if err := oprot.WriteI64(int64(p.Count)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.count (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:count: ", p), err) - } - return err -} - -func (p *CountResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) - } - return err -} - -func (p *CountResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) - } - return err -} - -func (p *CountResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) - } - return err -} - -func (p *CountResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CountResponse(%+v)", *p) -} - -// Attributes: -// - Key -// - Value -type KeyValue struct { - Key *base.Blob `thrift:"key,1" db:"key" json:"key"` - Value *base.Blob `thrift:"value,2" db:"value" json:"value"` -} - -func NewKeyValue() *KeyValue { - return &KeyValue{} -} - -var KeyValue_Key_DEFAULT *base.Blob - -func (p *KeyValue) GetKey() *base.Blob { - if !p.IsSetKey() { - return KeyValue_Key_DEFAULT - } - return p.Key -} - -var KeyValue_Value_DEFAULT *base.Blob - -func (p *KeyValue) GetValue() *base.Blob { - if !p.IsSetValue() { - return KeyValue_Value_DEFAULT - } - return p.Value -} -func (p *KeyValue) IsSetKey() bool { - return p.Key != nil -} - -func (p *KeyValue) IsSetValue() bool { - return p.Value != nil -} - -func (p *KeyValue) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *KeyValue) ReadField1(iprot thrift.TProtocol) error { - p.Key = &base.Blob{} - if err := p.Key.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) - } - return nil -} - -func (p *KeyValue) ReadField2(iprot thrift.TProtocol) error { - p.Value = &base.Blob{} - if err := p.Value.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) - } - return nil -} - -func (p *KeyValue) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("key_value"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *KeyValue) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := p.Key.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *KeyValue) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := p.Value.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *KeyValue) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("KeyValue(%+v)", *p) -} - -// Attributes: -// - HashKey -// - Kvs -// - ExpireTsSeconds -type MultiPutRequest struct { - HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` - Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` - ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` -} - -func NewMultiPutRequest() *MultiPutRequest { - return &MultiPutRequest{} -} - -var MultiPutRequest_HashKey_DEFAULT *base.Blob - -func (p *MultiPutRequest) GetHashKey() *base.Blob { - if !p.IsSetHashKey() { - return MultiPutRequest_HashKey_DEFAULT - } - return p.HashKey -} - -func (p *MultiPutRequest) GetKvs() []*KeyValue { - return p.Kvs -} - -func (p *MultiPutRequest) GetExpireTsSeconds() int32 { - return p.ExpireTsSeconds -} -func (p *MultiPutRequest) IsSetHashKey() bool { - return p.HashKey != nil -} - -func (p *MultiPutRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MultiPutRequest) ReadField1(iprot thrift.TProtocol) error { - p.HashKey = &base.Blob{} - if err := p.HashKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) - } - return nil -} - -func (p *MultiPutRequest) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*KeyValue, 0, size) - p.Kvs = tSlice - for i := 0; i < size; i++ { - _elem0 := &KeyValue{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Kvs = append(p.Kvs, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *MultiPutRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ExpireTsSeconds = v - } - return nil -} - -func (p *MultiPutRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_put_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MultiPutRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) - } - if err := p.HashKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) - } - return err -} - -func (p *MultiPutRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Kvs { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) - } - return err -} - -func (p *MultiPutRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) - } - if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) - } - return err -} - -func (p *MultiPutRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MultiPutRequest(%+v)", *p) -} - -// Attributes: -// - HashKey -// - SorkKeys -// - MaxCount -type MultiRemoveRequest struct { - HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` - SorkKeys []*base.Blob `thrift:"sork_keys,2" db:"sork_keys" json:"sork_keys"` - MaxCount int64 `thrift:"max_count,3" db:"max_count" json:"max_count"` -} - -func NewMultiRemoveRequest() *MultiRemoveRequest { - return &MultiRemoveRequest{} -} - -var MultiRemoveRequest_HashKey_DEFAULT *base.Blob - -func (p *MultiRemoveRequest) GetHashKey() *base.Blob { - if !p.IsSetHashKey() { - return MultiRemoveRequest_HashKey_DEFAULT - } - return p.HashKey -} - -func (p *MultiRemoveRequest) GetSorkKeys() []*base.Blob { - return p.SorkKeys -} - -func (p *MultiRemoveRequest) GetMaxCount() int64 { - return p.MaxCount -} -func (p *MultiRemoveRequest) IsSetHashKey() bool { - return p.HashKey != nil -} - -func (p *MultiRemoveRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MultiRemoveRequest) ReadField1(iprot thrift.TProtocol) error { - p.HashKey = &base.Blob{} - if err := p.HashKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) - } - return nil -} - -func (p *MultiRemoveRequest) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*base.Blob, 0, size) - p.SorkKeys = tSlice - for i := 0; i < size; i++ { - _elem1 := &base.Blob{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.SorkKeys = append(p.SorkKeys, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *MultiRemoveRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.MaxCount = v - } - return nil -} - -func (p *MultiRemoveRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_remove_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MultiRemoveRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) - } - if err := p.HashKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) - } - return err -} - -func (p *MultiRemoveRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("sork_keys", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sork_keys: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SorkKeys)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.SorkKeys { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sork_keys: ", p), err) - } - return err -} - -func (p *MultiRemoveRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("max_count", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_count: ", p), err) - } - if err := oprot.WriteI64(int64(p.MaxCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.max_count (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_count: ", p), err) - } - return err -} - -func (p *MultiRemoveRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MultiRemoveRequest(%+v)", *p) -} - -// Attributes: -// - Error -// - Count -// - AppID -// - PartitionIndex -// - Decree -// - Server -type MultiRemoveResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - Count int64 `thrift:"count,2" db:"count" json:"count"` - AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` - Decree int64 `thrift:"decree,5" db:"decree" json:"decree"` - Server string `thrift:"server,6" db:"server" json:"server"` -} - -func NewMultiRemoveResponse() *MultiRemoveResponse { - return &MultiRemoveResponse{} -} - -func (p *MultiRemoveResponse) GetError() int32 { - return p.Error -} - -func (p *MultiRemoveResponse) GetCount() int64 { - return p.Count -} - -func (p *MultiRemoveResponse) GetAppID() int32 { - return p.AppID -} - -func (p *MultiRemoveResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *MultiRemoveResponse) GetDecree() int64 { - return p.Decree -} - -func (p *MultiRemoveResponse) GetServer() string { - return p.Server -} -func (p *MultiRemoveResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MultiRemoveResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *MultiRemoveResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Count = v - } - return nil -} - -func (p *MultiRemoveResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *MultiRemoveResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *MultiRemoveResponse) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.Decree = v - } - return nil -} - -func (p *MultiRemoveResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *MultiRemoveResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_remove_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MultiRemoveResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *MultiRemoveResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("count", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:count: ", p), err) - } - if err := oprot.WriteI64(int64(p.Count)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.count (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:count: ", p), err) - } - return err -} - -func (p *MultiRemoveResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) - } - return err -} - -func (p *MultiRemoveResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) - } - return err -} - -func (p *MultiRemoveResponse) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("decree", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:decree: ", p), err) - } - if err := oprot.WriteI64(int64(p.Decree)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.decree (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:decree: ", p), err) - } - return err -} - -func (p *MultiRemoveResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) - } - return err -} - -func (p *MultiRemoveResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MultiRemoveResponse(%+v)", *p) -} - -// Attributes: -// - HashKey -// - SorkKeys -// - MaxKvCount -// - MaxKvSize -// - NoValue -// - StartSortkey -// - StopSortkey -// - StartInclusive -// - StopInclusive -// - SortKeyFilterType -// - SortKeyFilterPattern -// - Reverse -type MultiGetRequest struct { - HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` - SorkKeys []*base.Blob `thrift:"sork_keys,2" db:"sork_keys" json:"sork_keys"` - MaxKvCount int32 `thrift:"max_kv_count,3" db:"max_kv_count" json:"max_kv_count"` - MaxKvSize int32 `thrift:"max_kv_size,4" db:"max_kv_size" json:"max_kv_size"` - NoValue bool `thrift:"no_value,5" db:"no_value" json:"no_value"` - StartSortkey *base.Blob `thrift:"start_sortkey,6" db:"start_sortkey" json:"start_sortkey"` - StopSortkey *base.Blob `thrift:"stop_sortkey,7" db:"stop_sortkey" json:"stop_sortkey"` - StartInclusive bool `thrift:"start_inclusive,8" db:"start_inclusive" json:"start_inclusive"` - StopInclusive bool `thrift:"stop_inclusive,9" db:"stop_inclusive" json:"stop_inclusive"` - SortKeyFilterType FilterType `thrift:"sort_key_filter_type,10" db:"sort_key_filter_type" json:"sort_key_filter_type"` - SortKeyFilterPattern *base.Blob `thrift:"sort_key_filter_pattern,11" db:"sort_key_filter_pattern" json:"sort_key_filter_pattern"` - Reverse bool `thrift:"reverse,12" db:"reverse" json:"reverse"` -} - -func NewMultiGetRequest() *MultiGetRequest { - return &MultiGetRequest{} -} - -var MultiGetRequest_HashKey_DEFAULT *base.Blob - -func (p *MultiGetRequest) GetHashKey() *base.Blob { - if !p.IsSetHashKey() { - return MultiGetRequest_HashKey_DEFAULT - } - return p.HashKey -} - -func (p *MultiGetRequest) GetSorkKeys() []*base.Blob { - return p.SorkKeys -} - -func (p *MultiGetRequest) GetMaxKvCount() int32 { - return p.MaxKvCount -} - -func (p *MultiGetRequest) GetMaxKvSize() int32 { - return p.MaxKvSize -} - -func (p *MultiGetRequest) GetNoValue() bool { - return p.NoValue -} - -var MultiGetRequest_StartSortkey_DEFAULT *base.Blob - -func (p *MultiGetRequest) GetStartSortkey() *base.Blob { - if !p.IsSetStartSortkey() { - return MultiGetRequest_StartSortkey_DEFAULT - } - return p.StartSortkey -} - -var MultiGetRequest_StopSortkey_DEFAULT *base.Blob - -func (p *MultiGetRequest) GetStopSortkey() *base.Blob { - if !p.IsSetStopSortkey() { - return MultiGetRequest_StopSortkey_DEFAULT - } - return p.StopSortkey -} - -func (p *MultiGetRequest) GetStartInclusive() bool { - return p.StartInclusive -} - -func (p *MultiGetRequest) GetStopInclusive() bool { - return p.StopInclusive -} - -func (p *MultiGetRequest) GetSortKeyFilterType() FilterType { - return p.SortKeyFilterType -} - -var MultiGetRequest_SortKeyFilterPattern_DEFAULT *base.Blob - -func (p *MultiGetRequest) GetSortKeyFilterPattern() *base.Blob { - if !p.IsSetSortKeyFilterPattern() { - return MultiGetRequest_SortKeyFilterPattern_DEFAULT - } - return p.SortKeyFilterPattern -} - -func (p *MultiGetRequest) GetReverse() bool { - return p.Reverse -} -func (p *MultiGetRequest) IsSetHashKey() bool { - return p.HashKey != nil -} - -func (p *MultiGetRequest) IsSetStartSortkey() bool { - return p.StartSortkey != nil -} - -func (p *MultiGetRequest) IsSetStopSortkey() bool { - return p.StopSortkey != nil -} - -func (p *MultiGetRequest) IsSetSortKeyFilterPattern() bool { - return p.SortKeyFilterPattern != nil -} - -func (p *MultiGetRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField8(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField9(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.I32 { - if err := p.ReadField10(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField11(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 12: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField12(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MultiGetRequest) ReadField1(iprot thrift.TProtocol) error { - p.HashKey = &base.Blob{} - if err := p.HashKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) - } - return nil -} - -func (p *MultiGetRequest) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*base.Blob, 0, size) - p.SorkKeys = tSlice - for i := 0; i < size; i++ { - _elem2 := &base.Blob{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.SorkKeys = append(p.SorkKeys, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *MultiGetRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.MaxKvCount = v - } - return nil -} - -func (p *MultiGetRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.MaxKvSize = v - } - return nil -} - -func (p *MultiGetRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.NoValue = v - } - return nil -} - -func (p *MultiGetRequest) ReadField6(iprot thrift.TProtocol) error { - p.StartSortkey = &base.Blob{} - if err := p.StartSortkey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StartSortkey), err) - } - return nil -} - -func (p *MultiGetRequest) ReadField7(iprot thrift.TProtocol) error { - p.StopSortkey = &base.Blob{} - if err := p.StopSortkey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StopSortkey), err) - } - return nil -} - -func (p *MultiGetRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.StartInclusive = v - } - return nil -} - -func (p *MultiGetRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.StopInclusive = v - } - return nil -} - -func (p *MultiGetRequest) ReadField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 10: ", err) - } else { - temp := FilterType(v) - p.SortKeyFilterType = temp - } - return nil -} - -func (p *MultiGetRequest) ReadField11(iprot thrift.TProtocol) error { - p.SortKeyFilterPattern = &base.Blob{} - if err := p.SortKeyFilterPattern.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKeyFilterPattern), err) - } - return nil -} - -func (p *MultiGetRequest) ReadField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 12: ", err) - } else { - p.Reverse = v - } - return nil -} - -func (p *MultiGetRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_get_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - if err := p.writeField12(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MultiGetRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) - } - if err := p.HashKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("sork_keys", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sork_keys: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SorkKeys)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.SorkKeys { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sork_keys: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("max_kv_count", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_kv_count: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxKvCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.max_kv_count (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_kv_count: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("max_kv_size", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:max_kv_size: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxKvSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.max_kv_size (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:max_kv_size: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("no_value", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:no_value: ", p), err) - } - if err := oprot.WriteBool(bool(p.NoValue)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.no_value (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:no_value: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("start_sortkey", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_sortkey: ", p), err) - } - if err := p.StartSortkey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StartSortkey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_sortkey: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("stop_sortkey", thrift.STRUCT, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:stop_sortkey: ", p), err) - } - if err := p.StopSortkey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StopSortkey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:stop_sortkey: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("start_inclusive", thrift.BOOL, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:start_inclusive: ", p), err) - } - if err := oprot.WriteBool(bool(p.StartInclusive)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.start_inclusive (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:start_inclusive: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("stop_inclusive", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:stop_inclusive: ", p), err) - } - if err := oprot.WriteBool(bool(p.StopInclusive)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.stop_inclusive (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:stop_inclusive: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField10(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("sort_key_filter_type", thrift.I32, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:sort_key_filter_type: ", p), err) - } - if err := oprot.WriteI32(int32(p.SortKeyFilterType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.sort_key_filter_type (10) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:sort_key_filter_type: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField11(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("sort_key_filter_pattern", thrift.STRUCT, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:sort_key_filter_pattern: ", p), err) - } - if err := p.SortKeyFilterPattern.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKeyFilterPattern), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:sort_key_filter_pattern: ", p), err) - } - return err -} - -func (p *MultiGetRequest) writeField12(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("reverse", thrift.BOOL, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:reverse: ", p), err) - } - if err := oprot.WriteBool(bool(p.Reverse)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.reverse (12) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:reverse: ", p), err) - } - return err -} - -func (p *MultiGetRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MultiGetRequest(%+v)", *p) -} - -// Attributes: -// - Error -// - Kvs -// - AppID -// - PartitionIndex -// - Server -type MultiGetResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` - AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` - // unused field # 5 - Server string `thrift:"server,6" db:"server" json:"server"` -} - -func NewMultiGetResponse() *MultiGetResponse { - return &MultiGetResponse{} -} - -func (p *MultiGetResponse) GetError() int32 { - return p.Error -} - -func (p *MultiGetResponse) GetKvs() []*KeyValue { - return p.Kvs -} - -func (p *MultiGetResponse) GetAppID() int32 { - return p.AppID -} - -func (p *MultiGetResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *MultiGetResponse) GetServer() string { - return p.Server -} -func (p *MultiGetResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MultiGetResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *MultiGetResponse) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*KeyValue, 0, size) - p.Kvs = tSlice - for i := 0; i < size; i++ { - _elem3 := &KeyValue{} - if err := _elem3.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Kvs = append(p.Kvs, _elem3) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *MultiGetResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *MultiGetResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *MultiGetResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *MultiGetResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_get_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MultiGetResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *MultiGetResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Kvs { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) - } - return err -} - -func (p *MultiGetResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) - } - return err -} - -func (p *MultiGetResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) - } - return err -} - -func (p *MultiGetResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) - } - return err -} - -func (p *MultiGetResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MultiGetResponse(%+v)", *p) -} - -// Attributes: -// - Key -// - Increment -type IncrRequest struct { - Key *base.Blob `thrift:"key,1" db:"key" json:"key"` - Increment int64 `thrift:"increment,2" db:"increment" json:"increment"` -} - -func NewIncrRequest() *IncrRequest { - return &IncrRequest{} -} - -var IncrRequest_Key_DEFAULT *base.Blob - -func (p *IncrRequest) GetKey() *base.Blob { - if !p.IsSetKey() { - return IncrRequest_Key_DEFAULT - } - return p.Key -} - -func (p *IncrRequest) GetIncrement() int64 { - return p.Increment -} -func (p *IncrRequest) IsSetKey() bool { - return p.Key != nil -} - -func (p *IncrRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *IncrRequest) ReadField1(iprot thrift.TProtocol) error { - p.Key = &base.Blob{} - if err := p.Key.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) - } - return nil -} - -func (p *IncrRequest) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Increment = v - } - return nil -} - -func (p *IncrRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("incr_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *IncrRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := p.Key.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *IncrRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("increment", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:increment: ", p), err) - } - if err := oprot.WriteI64(int64(p.Increment)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.increment (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:increment: ", p), err) - } - return err -} - -func (p *IncrRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("IncrRequest(%+v)", *p) -} - -// Attributes: -// - Error -// - NewValue_ -// - AppID -// - PartitionIndex -// - Decree -// - Server -type IncrResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - NewValue_ int64 `thrift:"new_value,2" db:"new_value" json:"new_value"` - AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` - Decree int64 `thrift:"decree,5" db:"decree" json:"decree"` - Server string `thrift:"server,6" db:"server" json:"server"` -} - -func NewIncrResponse() *IncrResponse { - return &IncrResponse{} -} - -func (p *IncrResponse) GetError() int32 { - return p.Error -} - -func (p *IncrResponse) GetNewValue_() int64 { - return p.NewValue_ -} - -func (p *IncrResponse) GetAppID() int32 { - return p.AppID -} - -func (p *IncrResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *IncrResponse) GetDecree() int64 { - return p.Decree -} - -func (p *IncrResponse) GetServer() string { - return p.Server -} -func (p *IncrResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *IncrResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *IncrResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.NewValue_ = v - } - return nil -} - -func (p *IncrResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *IncrResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *IncrResponse) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.Decree = v - } - return nil -} - -func (p *IncrResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *IncrResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("incr_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *IncrResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *IncrResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("new_value", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_value: ", p), err) - } - if err := oprot.WriteI64(int64(p.NewValue_)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.new_value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_value: ", p), err) - } - return err -} - -func (p *IncrResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) - } - return err -} - -func (p *IncrResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) - } - return err -} - -func (p *IncrResponse) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("decree", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:decree: ", p), err) - } - if err := oprot.WriteI64(int64(p.Decree)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.decree (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:decree: ", p), err) - } - return err -} - -func (p *IncrResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) - } - return err -} - -func (p *IncrResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("IncrResponse(%+v)", *p) -} - -// Attributes: -// - HashKey -// - CheckSortKey -// - CheckType -// - CheckOperand -// - SetDiffSortKey -// - SetSortKey -// - SetValue -// - SetExpireTsSeconds -// - ReturnCheckValue -type CheckAndSetRequest struct { - HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` - CheckSortKey *base.Blob `thrift:"check_sort_key,2" db:"check_sort_key" json:"check_sort_key"` - CheckType CasCheckType `thrift:"check_type,3" db:"check_type" json:"check_type"` - CheckOperand *base.Blob `thrift:"check_operand,4" db:"check_operand" json:"check_operand"` - SetDiffSortKey bool `thrift:"set_diff_sort_key,5" db:"set_diff_sort_key" json:"set_diff_sort_key"` - SetSortKey *base.Blob `thrift:"set_sort_key,6" db:"set_sort_key" json:"set_sort_key"` - SetValue *base.Blob `thrift:"set_value,7" db:"set_value" json:"set_value"` - SetExpireTsSeconds int32 `thrift:"set_expire_ts_seconds,8" db:"set_expire_ts_seconds" json:"set_expire_ts_seconds"` - ReturnCheckValue bool `thrift:"return_check_value,9" db:"return_check_value" json:"return_check_value"` -} - -func NewCheckAndSetRequest() *CheckAndSetRequest { - return &CheckAndSetRequest{} -} - -var CheckAndSetRequest_HashKey_DEFAULT *base.Blob - -func (p *CheckAndSetRequest) GetHashKey() *base.Blob { - if !p.IsSetHashKey() { - return CheckAndSetRequest_HashKey_DEFAULT - } - return p.HashKey -} - -var CheckAndSetRequest_CheckSortKey_DEFAULT *base.Blob - -func (p *CheckAndSetRequest) GetCheckSortKey() *base.Blob { - if !p.IsSetCheckSortKey() { - return CheckAndSetRequest_CheckSortKey_DEFAULT - } - return p.CheckSortKey -} - -func (p *CheckAndSetRequest) GetCheckType() CasCheckType { - return p.CheckType -} - -var CheckAndSetRequest_CheckOperand_DEFAULT *base.Blob - -func (p *CheckAndSetRequest) GetCheckOperand() *base.Blob { - if !p.IsSetCheckOperand() { - return CheckAndSetRequest_CheckOperand_DEFAULT - } - return p.CheckOperand -} - -func (p *CheckAndSetRequest) GetSetDiffSortKey() bool { - return p.SetDiffSortKey -} - -var CheckAndSetRequest_SetSortKey_DEFAULT *base.Blob - -func (p *CheckAndSetRequest) GetSetSortKey() *base.Blob { - if !p.IsSetSetSortKey() { - return CheckAndSetRequest_SetSortKey_DEFAULT - } - return p.SetSortKey -} - -var CheckAndSetRequest_SetValue_DEFAULT *base.Blob - -func (p *CheckAndSetRequest) GetSetValue() *base.Blob { - if !p.IsSetSetValue() { - return CheckAndSetRequest_SetValue_DEFAULT - } - return p.SetValue -} - -func (p *CheckAndSetRequest) GetSetExpireTsSeconds() int32 { - return p.SetExpireTsSeconds -} - -func (p *CheckAndSetRequest) GetReturnCheckValue() bool { - return p.ReturnCheckValue -} -func (p *CheckAndSetRequest) IsSetHashKey() bool { - return p.HashKey != nil -} - -func (p *CheckAndSetRequest) IsSetCheckSortKey() bool { - return p.CheckSortKey != nil -} - -func (p *CheckAndSetRequest) IsSetCheckOperand() bool { - return p.CheckOperand != nil -} - -func (p *CheckAndSetRequest) IsSetSetSortKey() bool { - return p.SetSortKey != nil -} - -func (p *CheckAndSetRequest) IsSetSetValue() bool { - return p.SetValue != nil -} - -func (p *CheckAndSetRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.I32 { - if err := p.ReadField8(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField9(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CheckAndSetRequest) ReadField1(iprot thrift.TProtocol) error { - p.HashKey = &base.Blob{} - if err := p.HashKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) - } - return nil -} - -func (p *CheckAndSetRequest) ReadField2(iprot thrift.TProtocol) error { - p.CheckSortKey = &base.Blob{} - if err := p.CheckSortKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckSortKey), err) - } - return nil -} - -func (p *CheckAndSetRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := CasCheckType(v) - p.CheckType = temp - } - return nil -} - -func (p *CheckAndSetRequest) ReadField4(iprot thrift.TProtocol) error { - p.CheckOperand = &base.Blob{} - if err := p.CheckOperand.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckOperand), err) - } - return nil -} - -func (p *CheckAndSetRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.SetDiffSortKey = v - } - return nil -} - -func (p *CheckAndSetRequest) ReadField6(iprot thrift.TProtocol) error { - p.SetSortKey = &base.Blob{} - if err := p.SetSortKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SetSortKey), err) - } - return nil -} - -func (p *CheckAndSetRequest) ReadField7(iprot thrift.TProtocol) error { - p.SetValue = &base.Blob{} - if err := p.SetValue.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SetValue), err) - } - return nil -} - -func (p *CheckAndSetRequest) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.SetExpireTsSeconds = v - } - return nil -} - -func (p *CheckAndSetRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.ReturnCheckValue = v - } - return nil -} - -func (p *CheckAndSetRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("check_and_set_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CheckAndSetRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) - } - if err := p.HashKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("check_sort_key", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_sort_key: ", p), err) - } - if err := p.CheckSortKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckSortKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_sort_key: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("check_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_type: ", p), err) - } - if err := oprot.WriteI32(int32(p.CheckType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.check_type (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_type: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("check_operand", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_operand: ", p), err) - } - if err := p.CheckOperand.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckOperand), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_operand: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("set_diff_sort_key", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:set_diff_sort_key: ", p), err) - } - if err := oprot.WriteBool(bool(p.SetDiffSortKey)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.set_diff_sort_key (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:set_diff_sort_key: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("set_sort_key", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:set_sort_key: ", p), err) - } - if err := p.SetSortKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SetSortKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:set_sort_key: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("set_value", thrift.STRUCT, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:set_value: ", p), err) - } - if err := p.SetValue.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SetValue), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:set_value: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("set_expire_ts_seconds", thrift.I32, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:set_expire_ts_seconds: ", p), err) - } - if err := oprot.WriteI32(int32(p.SetExpireTsSeconds)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.set_expire_ts_seconds (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:set_expire_ts_seconds: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("return_check_value", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:return_check_value: ", p), err) - } - if err := oprot.WriteBool(bool(p.ReturnCheckValue)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.return_check_value (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:return_check_value: ", p), err) - } - return err -} - -func (p *CheckAndSetRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CheckAndSetRequest(%+v)", *p) -} - -// Attributes: -// - Error -// - CheckValueReturned -// - CheckValueExist -// - CheckValue -// - AppID -// - PartitionIndex -// - Decree -// - Server -type CheckAndSetResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - CheckValueReturned bool `thrift:"check_value_returned,2" db:"check_value_returned" json:"check_value_returned"` - CheckValueExist bool `thrift:"check_value_exist,3" db:"check_value_exist" json:"check_value_exist"` - CheckValue *base.Blob `thrift:"check_value,4" db:"check_value" json:"check_value"` - AppID int32 `thrift:"app_id,5" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,6" db:"partition_index" json:"partition_index"` - Decree int64 `thrift:"decree,7" db:"decree" json:"decree"` - Server string `thrift:"server,8" db:"server" json:"server"` -} - -func NewCheckAndSetResponse() *CheckAndSetResponse { - return &CheckAndSetResponse{} -} - -func (p *CheckAndSetResponse) GetError() int32 { - return p.Error -} - -func (p *CheckAndSetResponse) GetCheckValueReturned() bool { - return p.CheckValueReturned -} - -func (p *CheckAndSetResponse) GetCheckValueExist() bool { - return p.CheckValueExist -} - -var CheckAndSetResponse_CheckValue_DEFAULT *base.Blob - -func (p *CheckAndSetResponse) GetCheckValue() *base.Blob { - if !p.IsSetCheckValue() { - return CheckAndSetResponse_CheckValue_DEFAULT - } - return p.CheckValue -} - -func (p *CheckAndSetResponse) GetAppID() int32 { - return p.AppID -} - -func (p *CheckAndSetResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *CheckAndSetResponse) GetDecree() int64 { - return p.Decree -} - -func (p *CheckAndSetResponse) GetServer() string { - return p.Server -} -func (p *CheckAndSetResponse) IsSetCheckValue() bool { - return p.CheckValue != nil -} - -func (p *CheckAndSetResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I32 { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.STRING { - if err := p.ReadField8(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CheckAndSetResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *CheckAndSetResponse) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.CheckValueReturned = v - } - return nil -} - -func (p *CheckAndSetResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.CheckValueExist = v - } - return nil -} - -func (p *CheckAndSetResponse) ReadField4(iprot thrift.TProtocol) error { - p.CheckValue = &base.Blob{} - if err := p.CheckValue.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckValue), err) - } - return nil -} - -func (p *CheckAndSetResponse) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *CheckAndSetResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *CheckAndSetResponse) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.Decree = v - } - return nil -} - -func (p *CheckAndSetResponse) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *CheckAndSetResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("check_and_set_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CheckAndSetResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("check_value_returned", thrift.BOOL, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_value_returned: ", p), err) - } - if err := oprot.WriteBool(bool(p.CheckValueReturned)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.check_value_returned (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_value_returned: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("check_value_exist", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_value_exist: ", p), err) - } - if err := oprot.WriteBool(bool(p.CheckValueExist)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.check_value_exist (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_value_exist: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("check_value", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_value: ", p), err) - } - if err := p.CheckValue.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckValue), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_value: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:app_id: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:partition_index: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("decree", thrift.I64, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:decree: ", p), err) - } - if err := oprot.WriteI64(int64(p.Decree)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.decree (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:decree: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:server: ", p), err) - } - return err -} - -func (p *CheckAndSetResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CheckAndSetResponse(%+v)", *p) -} - -// Attributes: -// - StartKey -// - StopKey -// - StartInclusive -// - StopInclusive -// - BatchSize -// - NoValue -// - HashKeyFilterType -// - HashKeyFilterPattern -// - SortKeyFilterType -// - SortKeyFilterPattern -type GetScannerRequest struct { - StartKey *base.Blob `thrift:"start_key,1" db:"start_key" json:"start_key"` - StopKey *base.Blob `thrift:"stop_key,2" db:"stop_key" json:"stop_key"` - StartInclusive bool `thrift:"start_inclusive,3" db:"start_inclusive" json:"start_inclusive"` - StopInclusive bool `thrift:"stop_inclusive,4" db:"stop_inclusive" json:"stop_inclusive"` - BatchSize int32 `thrift:"batch_size,5" db:"batch_size" json:"batch_size"` - NoValue bool `thrift:"no_value,6" db:"no_value" json:"no_value"` - HashKeyFilterType FilterType `thrift:"hash_key_filter_type,7" db:"hash_key_filter_type" json:"hash_key_filter_type"` - HashKeyFilterPattern *base.Blob `thrift:"hash_key_filter_pattern,8" db:"hash_key_filter_pattern" json:"hash_key_filter_pattern"` - SortKeyFilterType FilterType `thrift:"sort_key_filter_type,9" db:"sort_key_filter_type" json:"sort_key_filter_type"` - SortKeyFilterPattern *base.Blob `thrift:"sort_key_filter_pattern,10" db:"sort_key_filter_pattern" json:"sort_key_filter_pattern"` -} - -func NewGetScannerRequest() *GetScannerRequest { - return &GetScannerRequest{} -} - -var GetScannerRequest_StartKey_DEFAULT *base.Blob - -func (p *GetScannerRequest) GetStartKey() *base.Blob { - if !p.IsSetStartKey() { - return GetScannerRequest_StartKey_DEFAULT - } - return p.StartKey -} - -var GetScannerRequest_StopKey_DEFAULT *base.Blob - -func (p *GetScannerRequest) GetStopKey() *base.Blob { - if !p.IsSetStopKey() { - return GetScannerRequest_StopKey_DEFAULT - } - return p.StopKey -} - -func (p *GetScannerRequest) GetStartInclusive() bool { - return p.StartInclusive -} - -func (p *GetScannerRequest) GetStopInclusive() bool { - return p.StopInclusive -} - -func (p *GetScannerRequest) GetBatchSize() int32 { - return p.BatchSize -} - -func (p *GetScannerRequest) GetNoValue() bool { - return p.NoValue -} - -func (p *GetScannerRequest) GetHashKeyFilterType() FilterType { - return p.HashKeyFilterType -} - -var GetScannerRequest_HashKeyFilterPattern_DEFAULT *base.Blob - -func (p *GetScannerRequest) GetHashKeyFilterPattern() *base.Blob { - if !p.IsSetHashKeyFilterPattern() { - return GetScannerRequest_HashKeyFilterPattern_DEFAULT - } - return p.HashKeyFilterPattern -} - -func (p *GetScannerRequest) GetSortKeyFilterType() FilterType { - return p.SortKeyFilterType -} - -var GetScannerRequest_SortKeyFilterPattern_DEFAULT *base.Blob - -func (p *GetScannerRequest) GetSortKeyFilterPattern() *base.Blob { - if !p.IsSetSortKeyFilterPattern() { - return GetScannerRequest_SortKeyFilterPattern_DEFAULT - } - return p.SortKeyFilterPattern -} -func (p *GetScannerRequest) IsSetStartKey() bool { - return p.StartKey != nil -} - -func (p *GetScannerRequest) IsSetStopKey() bool { - return p.StopKey != nil -} - -func (p *GetScannerRequest) IsSetHashKeyFilterPattern() bool { - return p.HashKeyFilterPattern != nil -} - -func (p *GetScannerRequest) IsSetSortKeyFilterPattern() bool { - return p.SortKeyFilterPattern != nil -} - -func (p *GetScannerRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField8(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.I32 { - if err := p.ReadField9(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField10(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *GetScannerRequest) ReadField1(iprot thrift.TProtocol) error { - p.StartKey = &base.Blob{} - if err := p.StartKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StartKey), err) - } - return nil -} - -func (p *GetScannerRequest) ReadField2(iprot thrift.TProtocol) error { - p.StopKey = &base.Blob{} - if err := p.StopKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StopKey), err) - } - return nil -} - -func (p *GetScannerRequest) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.StartInclusive = v - } - return nil -} - -func (p *GetScannerRequest) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.StopInclusive = v - } - return nil -} - -func (p *GetScannerRequest) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.BatchSize = v - } - return nil -} - -func (p *GetScannerRequest) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.NoValue = v - } - return nil -} - -func (p *GetScannerRequest) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - temp := FilterType(v) - p.HashKeyFilterType = temp - } - return nil -} - -func (p *GetScannerRequest) ReadField8(iprot thrift.TProtocol) error { - p.HashKeyFilterPattern = &base.Blob{} - if err := p.HashKeyFilterPattern.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKeyFilterPattern), err) - } - return nil -} - -func (p *GetScannerRequest) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - temp := FilterType(v) - p.SortKeyFilterType = temp - } - return nil -} - -func (p *GetScannerRequest) ReadField10(iprot thrift.TProtocol) error { - p.SortKeyFilterPattern = &base.Blob{} - if err := p.SortKeyFilterPattern.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKeyFilterPattern), err) - } - return nil -} - -func (p *GetScannerRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("get_scanner_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *GetScannerRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("start_key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:start_key: ", p), err) - } - if err := p.StartKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StartKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:start_key: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("stop_key", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stop_key: ", p), err) - } - if err := p.StopKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StopKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stop_key: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("start_inclusive", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:start_inclusive: ", p), err) - } - if err := oprot.WriteBool(bool(p.StartInclusive)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.start_inclusive (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:start_inclusive: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("stop_inclusive", thrift.BOOL, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stop_inclusive: ", p), err) - } - if err := oprot.WriteBool(bool(p.StopInclusive)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.stop_inclusive (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stop_inclusive: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch_size", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:batch_size: ", p), err) - } - if err := oprot.WriteI32(int32(p.BatchSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.batch_size (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:batch_size: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("no_value", thrift.BOOL, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:no_value: ", p), err) - } - if err := oprot.WriteBool(bool(p.NoValue)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.no_value (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:no_value: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hash_key_filter_type", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:hash_key_filter_type: ", p), err) - } - if err := oprot.WriteI32(int32(p.HashKeyFilterType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.hash_key_filter_type (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:hash_key_filter_type: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hash_key_filter_pattern", thrift.STRUCT, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hash_key_filter_pattern: ", p), err) - } - if err := p.HashKeyFilterPattern.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKeyFilterPattern), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hash_key_filter_pattern: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("sort_key_filter_type", thrift.I32, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:sort_key_filter_type: ", p), err) - } - if err := oprot.WriteI32(int32(p.SortKeyFilterType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.sort_key_filter_type (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:sort_key_filter_type: ", p), err) - } - return err -} - -func (p *GetScannerRequest) writeField10(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("sort_key_filter_pattern", thrift.STRUCT, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:sort_key_filter_pattern: ", p), err) - } - if err := p.SortKeyFilterPattern.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKeyFilterPattern), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:sort_key_filter_pattern: ", p), err) - } - return err -} - -func (p *GetScannerRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("GetScannerRequest(%+v)", *p) -} - -// Attributes: -// - ContextID -type ScanRequest struct { - ContextID int64 `thrift:"context_id,1" db:"context_id" json:"context_id"` -} - -func NewScanRequest() *ScanRequest { - return &ScanRequest{} -} - -func (p *ScanRequest) GetContextID() int64 { - return p.ContextID -} -func (p *ScanRequest) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ScanRequest) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ContextID = v - } - return nil -} - -func (p *ScanRequest) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("scan_request"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ScanRequest) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("context_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:context_id: ", p), err) - } - if err := oprot.WriteI64(int64(p.ContextID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.context_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:context_id: ", p), err) - } - return err -} - -func (p *ScanRequest) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ScanRequest(%+v)", *p) -} - -// Attributes: -// - Error -// - Kvs -// - ContextID -// - AppID -// - PartitionIndex -// - Server -type ScanResponse struct { - Error int32 `thrift:"error,1" db:"error" json:"error"` - Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` - ContextID int64 `thrift:"context_id,3" db:"context_id" json:"context_id"` - AppID int32 `thrift:"app_id,4" db:"app_id" json:"app_id"` - PartitionIndex int32 `thrift:"partition_index,5" db:"partition_index" json:"partition_index"` - Server string `thrift:"server,6" db:"server" json:"server"` -} - -func NewScanResponse() *ScanResponse { - return &ScanResponse{} -} - -func (p *ScanResponse) GetError() int32 { - return p.Error -} - -func (p *ScanResponse) GetKvs() []*KeyValue { - return p.Kvs -} - -func (p *ScanResponse) GetContextID() int64 { - return p.ContextID -} - -func (p *ScanResponse) GetAppID() int32 { - return p.AppID -} - -func (p *ScanResponse) GetPartitionIndex() int32 { - return p.PartitionIndex -} - -func (p *ScanResponse) GetServer() string { - return p.Server -} -func (p *ScanResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ScanResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Error = v - } - return nil -} - -func (p *ScanResponse) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*KeyValue, 0, size) - p.Kvs = tSlice - for i := 0; i < size; i++ { - _elem4 := &KeyValue{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Kvs = append(p.Kvs, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ScanResponse) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ContextID = v - } - return nil -} - -func (p *ScanResponse) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.AppID = v - } - return nil -} - -func (p *ScanResponse) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.PartitionIndex = v - } - return nil -} - -func (p *ScanResponse) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.Server = v - } - return nil -} - -func (p *ScanResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("scan_response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ScanResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) - } - if err := oprot.WriteI32(int32(p.Error)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) - } - return err -} - -func (p *ScanResponse) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Kvs { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) - } - return err -} - -func (p *ScanResponse) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("context_id", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:context_id: ", p), err) - } - if err := oprot.WriteI64(int64(p.ContextID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.context_id (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:context_id: ", p), err) - } - return err -} - -func (p *ScanResponse) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("app_id", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_id: ", p), err) - } - if err := oprot.WriteI32(int32(p.AppID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.app_id (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_id: ", p), err) - } - return err -} - -func (p *ScanResponse) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partition_index: ", p), err) - } - if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.partition_index (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partition_index: ", p), err) - } - return err -} - -func (p *ScanResponse) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) - } - if err := oprot.WriteString(string(p.Server)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) - } - return err -} - -func (p *ScanResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ScanResponse(%+v)", *p) -} - -type Rrdb interface { - // Parameters: - // - Update - Put(ctx context.Context, update *UpdateRequest) (r *UpdateResponse, err error) - // Parameters: - // - Request - MultiPut(ctx context.Context, request *MultiPutRequest) (r *UpdateResponse, err error) - // Parameters: - // - Key - Remove(ctx context.Context, key *base.Blob) (r *UpdateResponse, err error) - // Parameters: - // - Request - MultiRemove(ctx context.Context, request *MultiRemoveRequest) (r *MultiRemoveResponse, err error) - // Parameters: - // - Request - Incr(ctx context.Context, request *IncrRequest) (r *IncrResponse, err error) - // Parameters: - // - Request - CheckAndSet(ctx context.Context, request *CheckAndSetRequest) (r *CheckAndSetResponse, err error) - // Parameters: - // - Key - Get(ctx context.Context, key *base.Blob) (r *ReadResponse, err error) - // Parameters: - // - Request - MultiGet(ctx context.Context, request *MultiGetRequest) (r *MultiGetResponse, err error) - // Parameters: - // - HashKey - SortkeyCount(ctx context.Context, hash_key *base.Blob) (r *CountResponse, err error) - // Parameters: - // - Key - TTL(ctx context.Context, key *base.Blob) (r *TTLResponse, err error) - // Parameters: - // - Request - GetScanner(ctx context.Context, request *GetScannerRequest) (r *ScanResponse, err error) - // Parameters: - // - Request - Scan(ctx context.Context, request *ScanRequest) (r *ScanResponse, err error) - // Parameters: - // - ContextID - ClearScanner(ctx context.Context, context_id int64) (err error) -} - -type RrdbClient struct { - c thrift.TClient -} - -func NewRrdbClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *RrdbClient { - return &RrdbClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewRrdbClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *RrdbClient { - return &RrdbClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewRrdbClient(c thrift.TClient) *RrdbClient { - return &RrdbClient{ - c: c, - } -} - -func (p *RrdbClient) Client_() thrift.TClient { - return p.c -} - -// Parameters: -// - Update -func (p *RrdbClient) Put(ctx context.Context, update *UpdateRequest) (r *UpdateResponse, err error) { - var _args5 RrdbPutArgs - _args5.Update = update - var _result6 RrdbPutResult - if err = p.Client_().Call(ctx, "put", &_args5, &_result6); err != nil { - return - } - return _result6.GetSuccess(), nil -} - -// Parameters: -// - Request -func (p *RrdbClient) MultiPut(ctx context.Context, request *MultiPutRequest) (r *UpdateResponse, err error) { - var _args7 RrdbMultiPutArgs - _args7.Request = request - var _result8 RrdbMultiPutResult - if err = p.Client_().Call(ctx, "multi_put", &_args7, &_result8); err != nil { - return - } - return _result8.GetSuccess(), nil -} - -// Parameters: -// - Key -func (p *RrdbClient) Remove(ctx context.Context, key *base.Blob) (r *UpdateResponse, err error) { - var _args9 RrdbRemoveArgs - _args9.Key = key - var _result10 RrdbRemoveResult - if err = p.Client_().Call(ctx, "remove", &_args9, &_result10); err != nil { - return - } - return _result10.GetSuccess(), nil -} - -// Parameters: -// - Request -func (p *RrdbClient) MultiRemove(ctx context.Context, request *MultiRemoveRequest) (r *MultiRemoveResponse, err error) { - var _args11 RrdbMultiRemoveArgs - _args11.Request = request - var _result12 RrdbMultiRemoveResult - if err = p.Client_().Call(ctx, "multi_remove", &_args11, &_result12); err != nil { - return - } - return _result12.GetSuccess(), nil -} - -// Parameters: -// - Request -func (p *RrdbClient) Incr(ctx context.Context, request *IncrRequest) (r *IncrResponse, err error) { - var _args13 RrdbIncrArgs - _args13.Request = request - var _result14 RrdbIncrResult - if err = p.Client_().Call(ctx, "incr", &_args13, &_result14); err != nil { - return - } - return _result14.GetSuccess(), nil -} - -// Parameters: -// - Request -func (p *RrdbClient) CheckAndSet(ctx context.Context, request *CheckAndSetRequest) (r *CheckAndSetResponse, err error) { - var _args15 RrdbCheckAndSetArgs - _args15.Request = request - var _result16 RrdbCheckAndSetResult - if err = p.Client_().Call(ctx, "check_and_set", &_args15, &_result16); err != nil { - return - } - return _result16.GetSuccess(), nil -} - -// Parameters: -// - Key -func (p *RrdbClient) Get(ctx context.Context, key *base.Blob) (r *ReadResponse, err error) { - var _args17 RrdbGetArgs - _args17.Key = key - var _result18 RrdbGetResult - if err = p.Client_().Call(ctx, "get", &_args17, &_result18); err != nil { - return - } - return _result18.GetSuccess(), nil -} - -// Parameters: -// - Request -func (p *RrdbClient) MultiGet(ctx context.Context, request *MultiGetRequest) (r *MultiGetResponse, err error) { - var _args19 RrdbMultiGetArgs - _args19.Request = request - var _result20 RrdbMultiGetResult - if err = p.Client_().Call(ctx, "multi_get", &_args19, &_result20); err != nil { - return - } - return _result20.GetSuccess(), nil -} - -// Parameters: -// - HashKey -func (p *RrdbClient) SortkeyCount(ctx context.Context, hash_key *base.Blob) (r *CountResponse, err error) { - var _args21 RrdbSortkeyCountArgs - _args21.HashKey = hash_key - var _result22 RrdbSortkeyCountResult - if err = p.Client_().Call(ctx, "sortkey_count", &_args21, &_result22); err != nil { - return - } - return _result22.GetSuccess(), nil -} - -// Parameters: -// - Key -func (p *RrdbClient) TTL(ctx context.Context, key *base.Blob) (r *TTLResponse, err error) { - var _args23 RrdbTTLArgs - _args23.Key = key - var _result24 RrdbTTLResult - if err = p.Client_().Call(ctx, "ttl", &_args23, &_result24); err != nil { - return - } - return _result24.GetSuccess(), nil -} - -// Parameters: -// - Request -func (p *RrdbClient) GetScanner(ctx context.Context, request *GetScannerRequest) (r *ScanResponse, err error) { - var _args25 RrdbGetScannerArgs - _args25.Request = request - var _result26 RrdbGetScannerResult - if err = p.Client_().Call(ctx, "get_scanner", &_args25, &_result26); err != nil { - return - } - return _result26.GetSuccess(), nil -} - -// Parameters: -// - Request -func (p *RrdbClient) Scan(ctx context.Context, request *ScanRequest) (r *ScanResponse, err error) { - var _args27 RrdbScanArgs - _args27.Request = request - var _result28 RrdbScanResult - if err = p.Client_().Call(ctx, "scan", &_args27, &_result28); err != nil { - return - } - return _result28.GetSuccess(), nil -} - -// Parameters: -// - ContextID -func (p *RrdbClient) ClearScanner(ctx context.Context, context_id int64) (err error) { - var _args29 RrdbClearScannerArgs - _args29.ContextID = context_id - if err := p.Client_().Call(ctx, "clear_scanner", &_args29, nil); err != nil { - return err - } - return nil -} - -type RrdbProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Rrdb -} - -func (p *RrdbProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *RrdbProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *RrdbProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewRrdbProcessor(handler Rrdb) *RrdbProcessor { - - self30 := &RrdbProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self30.processorMap["put"] = &rrdbProcessorPut{handler: handler} - self30.processorMap["multi_put"] = &rrdbProcessorMultiPut{handler: handler} - self30.processorMap["remove"] = &rrdbProcessorRemove{handler: handler} - self30.processorMap["multi_remove"] = &rrdbProcessorMultiRemove{handler: handler} - self30.processorMap["incr"] = &rrdbProcessorIncr{handler: handler} - self30.processorMap["check_and_set"] = &rrdbProcessorCheckAndSet{handler: handler} - self30.processorMap["get"] = &rrdbProcessorGet{handler: handler} - self30.processorMap["multi_get"] = &rrdbProcessorMultiGet{handler: handler} - self30.processorMap["sortkey_count"] = &rrdbProcessorSortkeyCount{handler: handler} - self30.processorMap["ttl"] = &rrdbProcessorTTL{handler: handler} - self30.processorMap["get_scanner"] = &rrdbProcessorGetScanner{handler: handler} - self30.processorMap["scan"] = &rrdbProcessorScan{handler: handler} - self30.processorMap["clear_scanner"] = &rrdbProcessorClearScanner{handler: handler} - return self30 -} - -func (p *RrdbProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x31 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x31.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x31 - -} - -type rrdbProcessorPut struct { - handler Rrdb -} - -func (p *rrdbProcessorPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbPutArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbPutResult{} - var retval *UpdateResponse - var err2 error - if retval, err2 = p.handler.Put(ctx, args.Update); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing put: "+err2.Error()) - oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("put", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorMultiPut struct { - handler Rrdb -} - -func (p *rrdbProcessorMultiPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbMultiPutArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("multi_put", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbMultiPutResult{} - var retval *UpdateResponse - var err2 error - if retval, err2 = p.handler.MultiPut(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_put: "+err2.Error()) - oprot.WriteMessageBegin("multi_put", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("multi_put", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorRemove struct { - handler Rrdb -} - -func (p *rrdbProcessorRemove) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbRemoveArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("remove", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbRemoveResult{} - var retval *UpdateResponse - var err2 error - if retval, err2 = p.handler.Remove(ctx, args.Key); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing remove: "+err2.Error()) - oprot.WriteMessageBegin("remove", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("remove", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorMultiRemove struct { - handler Rrdb -} - -func (p *rrdbProcessorMultiRemove) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbMultiRemoveArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("multi_remove", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbMultiRemoveResult{} - var retval *MultiRemoveResponse - var err2 error - if retval, err2 = p.handler.MultiRemove(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_remove: "+err2.Error()) - oprot.WriteMessageBegin("multi_remove", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("multi_remove", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorIncr struct { - handler Rrdb -} - -func (p *rrdbProcessorIncr) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbIncrArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("incr", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbIncrResult{} - var retval *IncrResponse - var err2 error - if retval, err2 = p.handler.Incr(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing incr: "+err2.Error()) - oprot.WriteMessageBegin("incr", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("incr", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorCheckAndSet struct { - handler Rrdb -} - -func (p *rrdbProcessorCheckAndSet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbCheckAndSetArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("check_and_set", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbCheckAndSetResult{} - var retval *CheckAndSetResponse - var err2 error - if retval, err2 = p.handler.CheckAndSet(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_and_set: "+err2.Error()) - oprot.WriteMessageBegin("check_and_set", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("check_and_set", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorGet struct { - handler Rrdb -} - -func (p *rrdbProcessorGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbGetArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbGetResult{} - var retval *ReadResponse - var err2 error - if retval, err2 = p.handler.Get(ctx, args.Key); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get: "+err2.Error()) - oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("get", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorMultiGet struct { - handler Rrdb -} - -func (p *rrdbProcessorMultiGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbMultiGetArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("multi_get", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbMultiGetResult{} - var retval *MultiGetResponse - var err2 error - if retval, err2 = p.handler.MultiGet(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_get: "+err2.Error()) - oprot.WriteMessageBegin("multi_get", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("multi_get", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorSortkeyCount struct { - handler Rrdb -} - -func (p *rrdbProcessorSortkeyCount) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbSortkeyCountArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("sortkey_count", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbSortkeyCountResult{} - var retval *CountResponse - var err2 error - if retval, err2 = p.handler.SortkeyCount(ctx, args.HashKey); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing sortkey_count: "+err2.Error()) - oprot.WriteMessageBegin("sortkey_count", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("sortkey_count", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorTTL struct { - handler Rrdb -} - -func (p *rrdbProcessorTTL) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbTTLArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("ttl", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbTTLResult{} - var retval *TTLResponse - var err2 error - if retval, err2 = p.handler.TTL(ctx, args.Key); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ttl: "+err2.Error()) - oprot.WriteMessageBegin("ttl", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("ttl", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorGetScanner struct { - handler Rrdb -} - -func (p *rrdbProcessorGetScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbGetScannerArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get_scanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbGetScannerResult{} - var retval *ScanResponse - var err2 error - if retval, err2 = p.handler.GetScanner(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_scanner: "+err2.Error()) - oprot.WriteMessageBegin("get_scanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("get_scanner", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorScan struct { - handler Rrdb -} - -func (p *rrdbProcessorScan) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbScanArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("scan", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := RrdbScanResult{} - var retval *ScanResponse - var err2 error - if retval, err2 = p.handler.Scan(ctx, args.Request); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing scan: "+err2.Error()) - oprot.WriteMessageBegin("scan", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("scan", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type rrdbProcessorClearScanner struct { - handler Rrdb -} - -func (p *rrdbProcessorClearScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := RrdbClearScannerArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.ClearScanner(ctx, args.ContextID); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Update -type RrdbPutArgs struct { - Update *UpdateRequest `thrift:"update,1" db:"update" json:"update"` -} - -func NewRrdbPutArgs() *RrdbPutArgs { - return &RrdbPutArgs{} -} - -var RrdbPutArgs_Update_DEFAULT *UpdateRequest - -func (p *RrdbPutArgs) GetUpdate() *UpdateRequest { - if !p.IsSetUpdate() { - return RrdbPutArgs_Update_DEFAULT - } - return p.Update -} -func (p *RrdbPutArgs) IsSetUpdate() bool { - return p.Update != nil -} - -func (p *RrdbPutArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbPutArgs) ReadField1(iprot thrift.TProtocol) error { - p.Update = &UpdateRequest{} - if err := p.Update.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Update), err) - } - return nil -} - -func (p *RrdbPutArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("put_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbPutArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("update", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:update: ", p), err) - } - if err := p.Update.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Update), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:update: ", p), err) - } - return err -} - -func (p *RrdbPutArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbPutArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbPutResult struct { - Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbPutResult() *RrdbPutResult { - return &RrdbPutResult{} -} - -var RrdbPutResult_Success_DEFAULT *UpdateResponse - -func (p *RrdbPutResult) GetSuccess() *UpdateResponse { - if !p.IsSetSuccess() { - return RrdbPutResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbPutResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbPutResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbPutResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &UpdateResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbPutResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("put_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbPutResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbPutResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbPutResult(%+v)", *p) -} - -// Attributes: -// - Request -type RrdbMultiPutArgs struct { - Request *MultiPutRequest `thrift:"request,1" db:"request" json:"request"` -} - -func NewRrdbMultiPutArgs() *RrdbMultiPutArgs { - return &RrdbMultiPutArgs{} -} - -var RrdbMultiPutArgs_Request_DEFAULT *MultiPutRequest - -func (p *RrdbMultiPutArgs) GetRequest() *MultiPutRequest { - if !p.IsSetRequest() { - return RrdbMultiPutArgs_Request_DEFAULT - } - return p.Request -} -func (p *RrdbMultiPutArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *RrdbMultiPutArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbMultiPutArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = &MultiPutRequest{} - if err := p.Request.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) - } - return nil -} - -func (p *RrdbMultiPutArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_put_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbMultiPutArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) - } - if err := p.Request.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) - } - return err -} - -func (p *RrdbMultiPutArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbMultiPutArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbMultiPutResult struct { - Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbMultiPutResult() *RrdbMultiPutResult { - return &RrdbMultiPutResult{} -} - -var RrdbMultiPutResult_Success_DEFAULT *UpdateResponse - -func (p *RrdbMultiPutResult) GetSuccess() *UpdateResponse { - if !p.IsSetSuccess() { - return RrdbMultiPutResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbMultiPutResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbMultiPutResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbMultiPutResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &UpdateResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbMultiPutResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_put_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbMultiPutResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbMultiPutResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbMultiPutResult(%+v)", *p) -} - -// Attributes: -// - Key -type RrdbRemoveArgs struct { - Key *base.Blob `thrift:"key,1" db:"key" json:"key"` -} - -func NewRrdbRemoveArgs() *RrdbRemoveArgs { - return &RrdbRemoveArgs{} -} - -var RrdbRemoveArgs_Key_DEFAULT *base.Blob - -func (p *RrdbRemoveArgs) GetKey() *base.Blob { - if !p.IsSetKey() { - return RrdbRemoveArgs_Key_DEFAULT - } - return p.Key -} -func (p *RrdbRemoveArgs) IsSetKey() bool { - return p.Key != nil -} - -func (p *RrdbRemoveArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbRemoveArgs) ReadField1(iprot thrift.TProtocol) error { - p.Key = &base.Blob{} - if err := p.Key.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) - } - return nil -} - -func (p *RrdbRemoveArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("remove_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbRemoveArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := p.Key.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *RrdbRemoveArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbRemoveArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbRemoveResult struct { - Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbRemoveResult() *RrdbRemoveResult { - return &RrdbRemoveResult{} -} - -var RrdbRemoveResult_Success_DEFAULT *UpdateResponse - -func (p *RrdbRemoveResult) GetSuccess() *UpdateResponse { - if !p.IsSetSuccess() { - return RrdbRemoveResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbRemoveResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbRemoveResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbRemoveResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &UpdateResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbRemoveResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("remove_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbRemoveResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbRemoveResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbRemoveResult(%+v)", *p) -} - -// Attributes: -// - Request -type RrdbMultiRemoveArgs struct { - Request *MultiRemoveRequest `thrift:"request,1" db:"request" json:"request"` -} - -func NewRrdbMultiRemoveArgs() *RrdbMultiRemoveArgs { - return &RrdbMultiRemoveArgs{} -} - -var RrdbMultiRemoveArgs_Request_DEFAULT *MultiRemoveRequest - -func (p *RrdbMultiRemoveArgs) GetRequest() *MultiRemoveRequest { - if !p.IsSetRequest() { - return RrdbMultiRemoveArgs_Request_DEFAULT - } - return p.Request -} -func (p *RrdbMultiRemoveArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *RrdbMultiRemoveArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbMultiRemoveArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = &MultiRemoveRequest{} - if err := p.Request.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) - } - return nil -} - -func (p *RrdbMultiRemoveArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_remove_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbMultiRemoveArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) - } - if err := p.Request.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) - } - return err -} - -func (p *RrdbMultiRemoveArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbMultiRemoveArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbMultiRemoveResult struct { - Success *MultiRemoveResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbMultiRemoveResult() *RrdbMultiRemoveResult { - return &RrdbMultiRemoveResult{} -} - -var RrdbMultiRemoveResult_Success_DEFAULT *MultiRemoveResponse - -func (p *RrdbMultiRemoveResult) GetSuccess() *MultiRemoveResponse { - if !p.IsSetSuccess() { - return RrdbMultiRemoveResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbMultiRemoveResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbMultiRemoveResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbMultiRemoveResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &MultiRemoveResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbMultiRemoveResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_remove_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbMultiRemoveResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbMultiRemoveResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbMultiRemoveResult(%+v)", *p) -} - -// Attributes: -// - Request -type RrdbIncrArgs struct { - Request *IncrRequest `thrift:"request,1" db:"request" json:"request"` -} - -func NewRrdbIncrArgs() *RrdbIncrArgs { - return &RrdbIncrArgs{} -} - -var RrdbIncrArgs_Request_DEFAULT *IncrRequest - -func (p *RrdbIncrArgs) GetRequest() *IncrRequest { - if !p.IsSetRequest() { - return RrdbIncrArgs_Request_DEFAULT - } - return p.Request -} -func (p *RrdbIncrArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *RrdbIncrArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbIncrArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = &IncrRequest{} - if err := p.Request.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) - } - return nil -} - -func (p *RrdbIncrArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("incr_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbIncrArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) - } - if err := p.Request.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) - } - return err -} - -func (p *RrdbIncrArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbIncrArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbIncrResult struct { - Success *IncrResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbIncrResult() *RrdbIncrResult { - return &RrdbIncrResult{} -} - -var RrdbIncrResult_Success_DEFAULT *IncrResponse - -func (p *RrdbIncrResult) GetSuccess() *IncrResponse { - if !p.IsSetSuccess() { - return RrdbIncrResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbIncrResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbIncrResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbIncrResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &IncrResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbIncrResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("incr_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbIncrResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbIncrResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbIncrResult(%+v)", *p) -} - -// Attributes: -// - Request -type RrdbCheckAndSetArgs struct { - Request *CheckAndSetRequest `thrift:"request,1" db:"request" json:"request"` -} - -func NewRrdbCheckAndSetArgs() *RrdbCheckAndSetArgs { - return &RrdbCheckAndSetArgs{} -} - -var RrdbCheckAndSetArgs_Request_DEFAULT *CheckAndSetRequest - -func (p *RrdbCheckAndSetArgs) GetRequest() *CheckAndSetRequest { - if !p.IsSetRequest() { - return RrdbCheckAndSetArgs_Request_DEFAULT - } - return p.Request -} -func (p *RrdbCheckAndSetArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *RrdbCheckAndSetArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbCheckAndSetArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = &CheckAndSetRequest{} - if err := p.Request.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) - } - return nil -} - -func (p *RrdbCheckAndSetArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("check_and_set_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbCheckAndSetArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) - } - if err := p.Request.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) - } - return err -} - -func (p *RrdbCheckAndSetArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbCheckAndSetArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbCheckAndSetResult struct { - Success *CheckAndSetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbCheckAndSetResult() *RrdbCheckAndSetResult { - return &RrdbCheckAndSetResult{} -} - -var RrdbCheckAndSetResult_Success_DEFAULT *CheckAndSetResponse - -func (p *RrdbCheckAndSetResult) GetSuccess() *CheckAndSetResponse { - if !p.IsSetSuccess() { - return RrdbCheckAndSetResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbCheckAndSetResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbCheckAndSetResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbCheckAndSetResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &CheckAndSetResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbCheckAndSetResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("check_and_set_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbCheckAndSetResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbCheckAndSetResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbCheckAndSetResult(%+v)", *p) -} - -// Attributes: -// - Key -type RrdbGetArgs struct { - Key *base.Blob `thrift:"key,1" db:"key" json:"key"` -} - -func NewRrdbGetArgs() *RrdbGetArgs { - return &RrdbGetArgs{} -} - -var RrdbGetArgs_Key_DEFAULT *base.Blob - -func (p *RrdbGetArgs) GetKey() *base.Blob { - if !p.IsSetKey() { - return RrdbGetArgs_Key_DEFAULT - } - return p.Key -} -func (p *RrdbGetArgs) IsSetKey() bool { - return p.Key != nil -} - -func (p *RrdbGetArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbGetArgs) ReadField1(iprot thrift.TProtocol) error { - p.Key = &base.Blob{} - if err := p.Key.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) - } - return nil -} - -func (p *RrdbGetArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("get_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbGetArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := p.Key.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *RrdbGetArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbGetArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbGetResult struct { - Success *ReadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbGetResult() *RrdbGetResult { - return &RrdbGetResult{} -} - -var RrdbGetResult_Success_DEFAULT *ReadResponse - -func (p *RrdbGetResult) GetSuccess() *ReadResponse { - if !p.IsSetSuccess() { - return RrdbGetResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbGetResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbGetResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbGetResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ReadResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbGetResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("get_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbGetResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbGetResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbGetResult(%+v)", *p) -} - -// Attributes: -// - Request -type RrdbMultiGetArgs struct { - Request *MultiGetRequest `thrift:"request,1" db:"request" json:"request"` -} - -func NewRrdbMultiGetArgs() *RrdbMultiGetArgs { - return &RrdbMultiGetArgs{} -} - -var RrdbMultiGetArgs_Request_DEFAULT *MultiGetRequest - -func (p *RrdbMultiGetArgs) GetRequest() *MultiGetRequest { - if !p.IsSetRequest() { - return RrdbMultiGetArgs_Request_DEFAULT - } - return p.Request -} -func (p *RrdbMultiGetArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *RrdbMultiGetArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbMultiGetArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = &MultiGetRequest{} - if err := p.Request.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) - } - return nil -} - -func (p *RrdbMultiGetArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_get_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbMultiGetArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) - } - if err := p.Request.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) - } - return err -} - -func (p *RrdbMultiGetArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbMultiGetArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbMultiGetResult struct { - Success *MultiGetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbMultiGetResult() *RrdbMultiGetResult { - return &RrdbMultiGetResult{} -} - -var RrdbMultiGetResult_Success_DEFAULT *MultiGetResponse - -func (p *RrdbMultiGetResult) GetSuccess() *MultiGetResponse { - if !p.IsSetSuccess() { - return RrdbMultiGetResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbMultiGetResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbMultiGetResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbMultiGetResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &MultiGetResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbMultiGetResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("multi_get_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbMultiGetResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbMultiGetResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbMultiGetResult(%+v)", *p) -} - -// Attributes: -// - HashKey -type RrdbSortkeyCountArgs struct { - HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` -} - -func NewRrdbSortkeyCountArgs() *RrdbSortkeyCountArgs { - return &RrdbSortkeyCountArgs{} -} - -var RrdbSortkeyCountArgs_HashKey_DEFAULT *base.Blob - -func (p *RrdbSortkeyCountArgs) GetHashKey() *base.Blob { - if !p.IsSetHashKey() { - return RrdbSortkeyCountArgs_HashKey_DEFAULT - } - return p.HashKey -} -func (p *RrdbSortkeyCountArgs) IsSetHashKey() bool { - return p.HashKey != nil -} - -func (p *RrdbSortkeyCountArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbSortkeyCountArgs) ReadField1(iprot thrift.TProtocol) error { - p.HashKey = &base.Blob{} - if err := p.HashKey.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) - } - return nil -} - -func (p *RrdbSortkeyCountArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("sortkey_count_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbSortkeyCountArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) - } - if err := p.HashKey.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) - } - return err -} - -func (p *RrdbSortkeyCountArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbSortkeyCountArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbSortkeyCountResult struct { - Success *CountResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbSortkeyCountResult() *RrdbSortkeyCountResult { - return &RrdbSortkeyCountResult{} -} - -var RrdbSortkeyCountResult_Success_DEFAULT *CountResponse - -func (p *RrdbSortkeyCountResult) GetSuccess() *CountResponse { - if !p.IsSetSuccess() { - return RrdbSortkeyCountResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbSortkeyCountResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbSortkeyCountResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbSortkeyCountResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &CountResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbSortkeyCountResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("sortkey_count_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbSortkeyCountResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbSortkeyCountResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbSortkeyCountResult(%+v)", *p) -} - -// Attributes: -// - Key -type RrdbTTLArgs struct { - Key *base.Blob `thrift:"key,1" db:"key" json:"key"` -} - -func NewRrdbTTLArgs() *RrdbTTLArgs { - return &RrdbTTLArgs{} -} - -var RrdbTTLArgs_Key_DEFAULT *base.Blob - -func (p *RrdbTTLArgs) GetKey() *base.Blob { - if !p.IsSetKey() { - return RrdbTTLArgs_Key_DEFAULT - } - return p.Key -} -func (p *RrdbTTLArgs) IsSetKey() bool { - return p.Key != nil -} - -func (p *RrdbTTLArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbTTLArgs) ReadField1(iprot thrift.TProtocol) error { - p.Key = &base.Blob{} - if err := p.Key.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) - } - return nil -} - -func (p *RrdbTTLArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ttl_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbTTLArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := p.Key.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *RrdbTTLArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbTTLArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbTTLResult struct { - Success *TTLResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbTTLResult() *RrdbTTLResult { - return &RrdbTTLResult{} -} - -var RrdbTTLResult_Success_DEFAULT *TTLResponse - -func (p *RrdbTTLResult) GetSuccess() *TTLResponse { - if !p.IsSetSuccess() { - return RrdbTTLResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbTTLResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbTTLResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbTTLResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &TTLResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbTTLResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ttl_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbTTLResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbTTLResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbTTLResult(%+v)", *p) -} - -// Attributes: -// - Request -type RrdbGetScannerArgs struct { - Request *GetScannerRequest `thrift:"request,1" db:"request" json:"request"` -} - -func NewRrdbGetScannerArgs() *RrdbGetScannerArgs { - return &RrdbGetScannerArgs{} -} - -var RrdbGetScannerArgs_Request_DEFAULT *GetScannerRequest - -func (p *RrdbGetScannerArgs) GetRequest() *GetScannerRequest { - if !p.IsSetRequest() { - return RrdbGetScannerArgs_Request_DEFAULT - } - return p.Request -} -func (p *RrdbGetScannerArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *RrdbGetScannerArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbGetScannerArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = &GetScannerRequest{} - if err := p.Request.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) - } - return nil -} - -func (p *RrdbGetScannerArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("get_scanner_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbGetScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) - } - if err := p.Request.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) - } - return err -} - -func (p *RrdbGetScannerArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbGetScannerArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbGetScannerResult struct { - Success *ScanResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbGetScannerResult() *RrdbGetScannerResult { - return &RrdbGetScannerResult{} -} - -var RrdbGetScannerResult_Success_DEFAULT *ScanResponse - -func (p *RrdbGetScannerResult) GetSuccess() *ScanResponse { - if !p.IsSetSuccess() { - return RrdbGetScannerResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbGetScannerResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbGetScannerResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbGetScannerResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ScanResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbGetScannerResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("get_scanner_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbGetScannerResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbGetScannerResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbGetScannerResult(%+v)", *p) -} - -// Attributes: -// - Request -type RrdbScanArgs struct { - Request *ScanRequest `thrift:"request,1" db:"request" json:"request"` -} - -func NewRrdbScanArgs() *RrdbScanArgs { - return &RrdbScanArgs{} -} - -var RrdbScanArgs_Request_DEFAULT *ScanRequest - -func (p *RrdbScanArgs) GetRequest() *ScanRequest { - if !p.IsSetRequest() { - return RrdbScanArgs_Request_DEFAULT - } - return p.Request -} -func (p *RrdbScanArgs) IsSetRequest() bool { - return p.Request != nil -} - -func (p *RrdbScanArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbScanArgs) ReadField1(iprot thrift.TProtocol) error { - p.Request = &ScanRequest{} - if err := p.Request.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) - } - return nil -} - -func (p *RrdbScanArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("scan_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbScanArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) - } - if err := p.Request.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) - } - return err -} - -func (p *RrdbScanArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbScanArgs(%+v)", *p) -} - -// Attributes: -// - Success -type RrdbScanResult struct { - Success *ScanResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewRrdbScanResult() *RrdbScanResult { - return &RrdbScanResult{} -} - -var RrdbScanResult_Success_DEFAULT *ScanResponse - -func (p *RrdbScanResult) GetSuccess() *ScanResponse { - if !p.IsSetSuccess() { - return RrdbScanResult_Success_DEFAULT - } - return p.Success -} -func (p *RrdbScanResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *RrdbScanResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbScanResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &ScanResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *RrdbScanResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("scan_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbScanResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *RrdbScanResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbScanResult(%+v)", *p) -} - -// Attributes: -// - ContextID -type RrdbClearScannerArgs struct { - ContextID int64 `thrift:"context_id,1" db:"context_id" json:"context_id"` -} - -func NewRrdbClearScannerArgs() *RrdbClearScannerArgs { - return &RrdbClearScannerArgs{} -} - -func (p *RrdbClearScannerArgs) GetContextID() int64 { - return p.ContextID -} -func (p *RrdbClearScannerArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *RrdbClearScannerArgs) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ContextID = v - } - return nil -} - -func (p *RrdbClearScannerArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("clear_scanner_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RrdbClearScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("context_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:context_id: ", p), err) - } - if err := oprot.WriteI64(int64(p.ContextID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.context_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:context_id: ", p), err) - } - return err -} - -func (p *RrdbClearScannerArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RrdbClearScannerArgs(%+v)", *p) -} - -type Meta interface { - // Parameters: - // - Query - QueryCfg(ctx context.Context, query *replication.QueryCfgRequest) (r *replication.QueryCfgResponse, err error) -} - -type MetaClient struct { - c thrift.TClient -} - -func NewMetaClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *MetaClient { - return &MetaClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewMetaClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *MetaClient { - return &MetaClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewMetaClient(c thrift.TClient) *MetaClient { - return &MetaClient{ - c: c, - } -} - -func (p *MetaClient) Client_() thrift.TClient { - return p.c -} - -// Parameters: -// - Query -func (p *MetaClient) QueryCfg(ctx context.Context, query *replication.QueryCfgRequest) (r *replication.QueryCfgResponse, err error) { - var _args105 MetaQueryCfgArgs - _args105.Query = query - var _result106 MetaQueryCfgResult - if err = p.Client_().Call(ctx, "query_cfg", &_args105, &_result106); err != nil { - return - } - return _result106.GetSuccess(), nil -} - -type MetaProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Meta -} - -func (p *MetaProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *MetaProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *MetaProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewMetaProcessor(handler Meta) *MetaProcessor { - - self107 := &MetaProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self107.processorMap["query_cfg"] = &metaProcessorQueryCfg{handler: handler} - return self107 -} - -func (p *MetaProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x108 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x108.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x108 - -} - -type metaProcessorQueryCfg struct { - handler Meta -} - -func (p *metaProcessorQueryCfg) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := MetaQueryCfgArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("query_cfg", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := MetaQueryCfgResult{} - var retval *replication.QueryCfgResponse - var err2 error - if retval, err2 = p.handler.QueryCfg(ctx, args.Query); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_cfg: "+err2.Error()) - oprot.WriteMessageBegin("query_cfg", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("query_cfg", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Query -type MetaQueryCfgArgs struct { - Query *replication.QueryCfgRequest `thrift:"query,1" db:"query" json:"query"` -} - -func NewMetaQueryCfgArgs() *MetaQueryCfgArgs { - return &MetaQueryCfgArgs{} -} - -var MetaQueryCfgArgs_Query_DEFAULT *replication.QueryCfgRequest - -func (p *MetaQueryCfgArgs) GetQuery() *replication.QueryCfgRequest { - if !p.IsSetQuery() { - return MetaQueryCfgArgs_Query_DEFAULT - } - return p.Query -} -func (p *MetaQueryCfgArgs) IsSetQuery() bool { - return p.Query != nil -} - -func (p *MetaQueryCfgArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MetaQueryCfgArgs) ReadField1(iprot thrift.TProtocol) error { - p.Query = &replication.QueryCfgRequest{} - if err := p.Query.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Query), err) - } - return nil -} - -func (p *MetaQueryCfgArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_cfg_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MetaQueryCfgArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("query", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:query: ", p), err) - } - if err := p.Query.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Query), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:query: ", p), err) - } - return err -} - -func (p *MetaQueryCfgArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MetaQueryCfgArgs(%+v)", *p) -} - -// Attributes: -// - Success -type MetaQueryCfgResult struct { - Success *replication.QueryCfgResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewMetaQueryCfgResult() *MetaQueryCfgResult { - return &MetaQueryCfgResult{} -} - -var MetaQueryCfgResult_Success_DEFAULT *replication.QueryCfgResponse - -func (p *MetaQueryCfgResult) GetSuccess() *replication.QueryCfgResponse { - if !p.IsSetSuccess() { - return MetaQueryCfgResult_Success_DEFAULT - } - return p.Success -} -func (p *MetaQueryCfgResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *MetaQueryCfgResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MetaQueryCfgResult) ReadField0(iprot thrift.TProtocol) error { - p.Success = &replication.QueryCfgResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *MetaQueryCfgResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("query_cfg_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *MetaQueryCfgResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *MetaQueryCfgResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MetaQueryCfgResult(%+v)", *p) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegalog/logger.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegalog/logger.go deleted file mode 100644 index fa4fec0..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegalog/logger.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pegalog - -import ( - "log" - "os" - "sync" -) - -// The logger module in this file is inspired by etcd/clientv3/logger - -// Logger is the internal logger served for pegasus go client. -// WARN: Don't use this logger for your application. -type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) -} - -var ( - _logger settableLogger -) - -type settableLogger struct { - l Logger - mu sync.RWMutex -} - -// StderrLogger is an implementation of Logger that outputs logs to stderr. -// WARN: Don't use it in your production environment. Lack of logs after failures will make it -// significantly difficult to track the root cause. -var StderrLogger = log.New(os.Stderr, "", log.LstdFlags) - -func init() { - // by default we use stderr for logging - _logger.set(DefaultLogrusLogger) -} - -// SetLogger sets client-side Logger. By default, logs are disabled. -func SetLogger(l Logger) { - _logger.set(l) -} - -// GetLogger returns the current logger. -func GetLogger() Logger { - return _logger.get() -} - -func (s *settableLogger) set(l Logger) { - s.mu.Lock() - _logger.l = l - s.mu.Unlock() -} - -func (s *settableLogger) get() Logger { - s.mu.RLock() - l := _logger.l - s.mu.RUnlock() - return l -} - -func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } -func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegalog/logrus_logger.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegalog/logrus_logger.go deleted file mode 100644 index 58a8ec7..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegalog/logrus_logger.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pegalog - -import ( - "fmt" - "runtime" - "strings" - - "github.com/sirupsen/logrus" - "gopkg.in/natefinch/lumberjack.v2" -) - -// LogrusConfig is used to configure the generation of log files. -type LogrusConfig struct { - Filename string - MaxSize int - MaxAge int - MaxBackups int -} - -// callerPrettifier simplifies the caller info -func callerPrettifier(f *runtime.Frame) (function string, file string) { - function = f.Function[strings.LastIndex(f.Function, "/")+1:] - file = fmt.Sprint(f.File[strings.LastIndex(f.File, "/")+1:], ":", f.Line) - return function, file -} - -// NewLogrusLogger creates a new LogrusLogger. -func NewLogrusLogger(cfg *LogrusConfig) Logger { - l := logrus.New() - l.Formatter = &logrus.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - CallerPrettyfier: callerPrettifier, - } - l.Out = &lumberjack.Logger{ - Filename: cfg.Filename, - MaxSize: cfg.MaxSize, - MaxAge: cfg.MaxAge, - LocalTime: true, - } - l.ReportCaller = true - return l -} - -// DefaultLogrusLogger is a LogrusLogger instance with default configurations. -var DefaultLogrusLogger = NewLogrusLogger(&LogrusConfig{ - MaxSize: 500, // megabytes - MaxAge: 5, // days - Filename: "./pegasus.log", -}) diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/check_and_set.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/check_and_set.go deleted file mode 100644 index 01ab373..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/check_and_set.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -import "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - -// CheckType defines the types of value checking in a CAS. -type CheckType int - -// The value checking types -const ( - CheckTypeNoCheck = CheckType(rrdb.CasCheckType_CT_NO_CHECK) - - // existence - CheckTypeValueNotExist = CheckType(rrdb.CasCheckType_CT_VALUE_NOT_EXIST) // value is not exist - CheckTypeValueNotExistOrEmpty = CheckType(rrdb.CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY) // value is not exist or value is empty - CheckTypeValueExist = CheckType(rrdb.CasCheckType_CT_VALUE_EXIST) // value is exist - CheckTypeValueNotEmpty = CheckType(rrdb.CasCheckType_CT_VALUE_NOT_EMPTY) // value is exist and not empty - - // match - CheckTypeMatchAnywhere = CheckType(rrdb.CasCheckType_CT_VALUE_MATCH_ANYWHERE) // operand matches anywhere in value - CheckTypeMatchPrefix = CheckType(rrdb.CasCheckType_CT_VALUE_MATCH_PREFIX) // operand matches prefix in value - CheckTypeMatchPostfix = CheckType(rrdb.CasCheckType_CT_VALUE_MATCH_POSTFIX) // operand matches postfix in value - - // bytes compare - CheckTypeBytesLess = CheckType(rrdb.CasCheckType_CT_VALUE_BYTES_LESS) // bytes compare: value < operand - CheckTypeBytesLessOrEqual = CheckType(rrdb.CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL) // bytes compare: value <= operand - CheckTypeBytesEqual = CheckType(rrdb.CasCheckType_CT_VALUE_BYTES_EQUAL) // bytes compare: value == operand - CheckTypeBytesGreaterOrEqual = CheckType(rrdb.CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL) // bytes compare: value >= operand - CheckTypeBytesGreater = CheckType(rrdb.CasCheckType_CT_VALUE_BYTES_GREATER) // bytes compare: value > operand - - // int compare: first transfer bytes to int64; then compare by int value - CheckTypeIntLess = CheckType(rrdb.CasCheckType_CT_VALUE_INT_LESS) // int compare: value < operand - CheckTypeIntLessOrEqual = CheckType(rrdb.CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL) // int compare: value <= operand - CheckTypeIntEqual = CheckType(rrdb.CasCheckType_CT_VALUE_INT_EQUAL) // int compare: value == operand - CheckTypeIntGreaterOrEqual = CheckType(rrdb.CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL) // int compare: value >= operand - CheckTypeIntGreater = CheckType(rrdb.CasCheckType_CT_VALUE_BYTES_GREATER) // int compare: value > operand -) - -// CheckAndSetResult is the result of a CAS. -type CheckAndSetResult struct { - // true if set value succeed. - SetSucceed bool - - // the actual value if set value failed; null means the actual value is not exist. - CheckValue []byte - - // if the check value is exist; can be used only when checkValueReturned is true. - CheckValueExist bool - - // return the check value if exist; can be used only when checkValueExist is true. - CheckValueReturned bool -} - -// CheckAndSetOptions is the options of a CAS. -type CheckAndSetOptions struct { - SetValueTTLSeconds int // time to live in seconds of the set value, 0 means no ttl. - ReturnCheckValue bool // if return the check value in results. -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/client.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/client.go deleted file mode 100644 index aa6cc1a..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/client.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -import ( - "context" - "sync" - - "github.com/XiaoMi/pegasus-go-client/pegalog" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// Client manages the client sessions to the pegasus cluster specified by `Config`. -// In order to reuse the previous connections, it's recommended to use one singleton -// client in your program. The operations upon a client instance are thread-safe. -type Client interface { - Close() error - - // Open the specific pegasus table. If the table was opened before, - // it will reuse the previous connection to the table. - OpenTable(ctx context.Context, tableName string) (TableConnector, error) -} - -type pegasusClient struct { - tables map[string]TableConnector - - // protect the access of tables - mu sync.RWMutex - - metaMgr *session.MetaManager - replicaMgr *session.ReplicaManager -} - -// NewClient creates a new instance of pegasus client. -// It panics if the configured addresses are illegal. -func NewClient(cfg Config) Client { - c, err := newClientWithError(cfg) - if err != nil { - pegalog.GetLogger().Fatal(err) - return nil - } - return c -} - -func newClientWithError(cfg Config) (Client, error) { - var err error - cfg.MetaServers, err = session.ResolveMetaAddr(cfg.MetaServers) - if err != nil { - return nil, err - } - - c := &pegasusClient{ - tables: make(map[string]TableConnector), - metaMgr: session.NewMetaManager(cfg.MetaServers, session.NewNodeSession), - replicaMgr: session.NewReplicaManager(session.NewNodeSession), - } - return c, nil -} - -func (p *pegasusClient) Close() error { - p.mu.RLock() - defer p.mu.RUnlock() - - for _, table := range p.tables { - if err := table.Close(); err != nil { - return err - } - } - - if err := p.metaMgr.Close(); err != nil { - pegalog.GetLogger().Fatal("pegasus-go-client: unable to close MetaManager: ", err) - } - return p.replicaMgr.Close() -} - -func (p *pegasusClient) OpenTable(ctx context.Context, tableName string) (TableConnector, error) { - tb, err := func() (TableConnector, error) { - // ensure only one goroutine is fetching the routing table. - p.mu.Lock() - defer p.mu.Unlock() - - if tb := p.findTable(tableName); tb != nil { - return tb, nil - } - - var tb TableConnector - tb, err := ConnectTable(ctx, tableName, p.metaMgr, p.replicaMgr) - if err != nil { - return nil, err - } - p.tables[tableName] = tb - - return tb, nil - }() - return tb, WrapError(err, OpQueryConfig) -} - -func (p *pegasusClient) findTable(tableName string) TableConnector { - if tb, ok := p.tables[tableName]; ok { - return tb - } - return nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/config.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/config.go deleted file mode 100644 index d338a64..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/config.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -// Config is the configuration of pegasus client. -type Config struct { - MetaServers []string `json:"meta_servers"` -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/error.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/error.go deleted file mode 100644 index a1c6edb..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/error.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -import ( - "fmt" -) - -// PError is the return error type of all interfaces of pegasus client. -type PError struct { - // Err is the error that occurred during the operation. - Err error - - // The failed operation - Op OpType -} - -// OpType is the type of operation that led to PError. -type OpType int - -// Operation types -const ( - OpQueryConfig OpType = iota - OpGet - OpSet - OpDel - OpMultiDel - OpMultiGet - OpMultiGetRange - OpClose - OpMultiSet - OpTTL - OpExist - OpGetScanner - OpGetUnorderedScanners - OpNext - OpScannerClose - OpCheckAndSet - OpSortKeyCount - OpIncr - OpBatchGet -) - -var opTypeToStringMap = map[OpType]string{ - OpQueryConfig: "table configuration query", - OpGet: "GET", - OpSet: "SET", - OpDel: "DEL", - OpMultiGet: "MULTI_GET", - OpMultiGetRange: "MULTI_GET_RANGE", - OpMultiDel: "MULTI_DEL", - OpClose: "Close", - OpMultiSet: "MULTI_SET", - OpTTL: "TTL", - OpExist: "EXIST", - OpGetScanner: "GET_SCANNER", - OpGetUnorderedScanners: "GET_UNORDERED_SCANNERS", - OpNext: "SCAN_NEXT", - OpScannerClose: "SCANNER_CLOSE", - OpCheckAndSet: "CHECK_AND_SET", - OpSortKeyCount: "SORTKEY_COUNT", - OpIncr: "INCR", - OpBatchGet: "BATCH_GET", -} - -func (op OpType) String() string { - return opTypeToStringMap[op] -} - -func (e *PError) Error() string { - return fmt.Sprintf("pegasus %s failed: %s", e.Op, e.Err.Error()) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/filter.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/filter.go deleted file mode 100644 index c85965f..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/filter.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -import "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - -// FilterType defines the type of key filtering. -type FilterType int - -// Filter types -const ( - FilterTypeNoFilter = FilterType(rrdb.FilterType_FT_NO_FILTER) - FilterTypeMatchAnywhere = FilterType(rrdb.FilterType_FT_MATCH_ANYWHERE) - FilterTypeMatchPrefix = FilterType(rrdb.FilterType_FT_MATCH_PREFIX) - FilterTypeMatchPostfix = FilterType(rrdb.FilterType_FT_MATCH_POSTFIX) -) - -// Filter is used to filter based on the key. -type Filter struct { - Type FilterType - Pattern []byte -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/check_and_set.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/check_and_set.go deleted file mode 100644 index cb742ef..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/check_and_set.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// CheckAndSet inherits op.Request. -type CheckAndSet struct { - Req *rrdb.CheckAndSetRequest -} - -// CheckAndSetResult is the result of a CAS. -type CheckAndSetResult struct { - SetSucceed bool - CheckValue []byte - CheckValueExist bool - CheckValueReturned bool -} - -// Validate arguments. -func (r *CheckAndSet) Validate() error { - if err := validateHashKey(r.Req.HashKey.Data); err != nil { - return err - } - return nil -} - -// Run operation. -func (r *CheckAndSet) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.CheckAndSet(ctx, gpid, r.Req) - err = wrapRPCFailure(resp, err) - if err == base.TryAgain { - err = nil - } - if err != nil { - return nil, err - } - result := &CheckAndSetResult{ - SetSucceed: resp.Error == 0, - CheckValueReturned: resp.CheckValueReturned, - CheckValueExist: resp.CheckValueReturned && resp.CheckValueExist, - } - if resp.CheckValueReturned && resp.CheckValueExist && resp.CheckValue != nil && resp.CheckValue.Data != nil && len(resp.CheckValue.Data) != 0 { - result.CheckValue = resp.CheckValue.Data - } - return result, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/del.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/del.go deleted file mode 100644 index ce34c13..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/del.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// Del inherits op.Request. -type Del struct { - HashKey []byte - SortKey []byte - - req *base.Blob -} - -// Validate arguments. -func (r *Del) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if err := validateSortKey(r.SortKey); err != nil { - return err - } - r.req = encodeHashKeySortKey(r.HashKey, r.SortKey) - return nil -} - -// Run operation. -func (r *Del) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.Del(ctx, gpid, r.req) - if err := wrapRPCFailure(resp, err); err != nil { - return nil, err - } - return nil, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/get.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/get.go deleted file mode 100644 index c2e1ab3..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/get.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// Get inherits op.Request. -type Get struct { - HashKey []byte - SortKey []byte - - req *base.Blob -} - -// Validate arguments. -func (r *Get) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if err := validateSortKey(r.SortKey); err != nil { - return err - } - r.req = encodeHashKeySortKey(r.HashKey, r.SortKey) - return nil -} - -// Run operation. -func (r *Get) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.Get(ctx, gpid, r.req) - err = wrapRPCFailure(resp, err) - if err == base.NotFound { - // Success for non-existed entry. - return nil, nil - } - if err != nil { - return nil, err - } - return resp.Value.Data, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/incr.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/incr.go deleted file mode 100644 index 75514dc..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/incr.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// Incr inherits op.Request. -type Incr struct { - HashKey []byte - SortKey []byte - Increment int64 - - req *rrdb.IncrRequest -} - -// Validate arguments. -func (r *Incr) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if err := validateSortKey(r.SortKey); err != nil { - return err - } - - r.req = rrdb.NewIncrRequest() - r.req.Key = encodeHashKeySortKey(r.HashKey, r.SortKey) - r.req.Increment = r.Increment - return nil -} - -// Run operation. -func (r *Incr) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.Incr(ctx, gpid, r.req) - if err := wrapRPCFailure(resp, err); err != nil { - return 0, err - } - return resp.NewValue_, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multidel.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multidel.go deleted file mode 100644 index 2774df4..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multidel.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// MultiDel inherits op.Request. -type MultiDel struct { - HashKey []byte - SortKeys [][]byte - - req *rrdb.MultiRemoveRequest -} - -// Validate arguments. -func (r *MultiDel) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if err := validateSortKeys(r.SortKeys); err != nil { - return err - } - - r.req = rrdb.NewMultiRemoveRequest() - r.req.HashKey = &base.Blob{Data: r.HashKey} - r.req.SorkKeys = make([]*base.Blob, len(r.SortKeys)) - for i, sortKey := range r.SortKeys { - r.req.SorkKeys[i] = &base.Blob{Data: sortKey} - } - return nil -} - -// Run operation. -func (r *MultiDel) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.MultiDelete(ctx, gpid, r.req) - if err := wrapRPCFailure(resp, err); err != nil { - return nil, err - } - return nil, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multiget.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multiget.go deleted file mode 100644 index b36819a..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multiget.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// MultiGet inherits op.Request. -type MultiGet struct { - HashKey []byte - SortKeys [][]byte - StartSortkey []byte - StopSortkey []byte - - Req *rrdb.MultiGetRequest -} - -// MultiGetResult -type MultiGetResult struct { - KVs []*rrdb.KeyValue - AllFetched bool -} - -// Validate arguments. -func (r *MultiGet) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if len(r.SortKeys) != 0 { - // sortKeys are nil-able, nil means fetching all entries. - if err := validateSortKeys(r.SortKeys); err != nil { - return err - } - r.Req.SorkKeys = make([]*base.Blob, len(r.SortKeys)) - for i, sortKey := range r.SortKeys { - r.Req.SorkKeys[i] = &base.Blob{Data: sortKey} - } - } - r.Req.HashKey = &base.Blob{Data: r.HashKey} - if r.StartSortkey == nil { - r.Req.StartSortkey = &base.Blob{} - } else { - r.Req.StartSortkey = &base.Blob{Data: r.StartSortkey} - } - if r.StopSortkey == nil { - r.Req.StopSortkey = &base.Blob{} - } else { - r.Req.StopSortkey = &base.Blob{Data: r.StopSortkey} - } - return nil -} - -// Run operation. -func (r *MultiGet) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.MultiGet(ctx, gpid, r.Req) - err = wrapRPCFailure(resp, err) - allFetched := true - if err == base.Incomplete { - // partial data is fetched - allFetched = false - err = nil - } - if err != nil { - return nil, err - } - return &MultiGetResult{KVs: resp.Kvs, AllFetched: allFetched}, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multiset.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multiset.go deleted file mode 100644 index aa16259..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/multiset.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - "fmt" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// MultiSet inherits op.Request. -type MultiSet struct { - HashKey []byte - SortKeys [][]byte - Values [][]byte - TTL time.Duration - - req *rrdb.MultiPutRequest -} - -// Validate arguments. -func (r *MultiSet) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if err := validateSortKeys(r.SortKeys); err != nil { - return err - } - if err := validateValues(r.Values); err != nil { - return err - } - if len(r.SortKeys) != len(r.Values) { - return fmt.Errorf("InvalidParameter: unmatched key-value pairs: len(sortKeys)=%d len(values)=%d", - len(r.SortKeys), len(r.Values)) - } - - r.req = rrdb.NewMultiPutRequest() - r.req.HashKey = &base.Blob{Data: r.HashKey} - r.req.Kvs = make([]*rrdb.KeyValue, len(r.SortKeys)) - for i := 0; i < len(r.SortKeys); i++ { - r.req.Kvs[i] = &rrdb.KeyValue{ - Key: &base.Blob{Data: r.SortKeys[i]}, - Value: &base.Blob{Data: r.Values[i]}, - } - } - r.req.ExpireTsSeconds = 0 - if r.TTL != 0 { - r.req.ExpireTsSeconds = expireTsSeconds(r.TTL) - } - return nil -} - -// Run operation. -func (r *MultiSet) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.MultiSet(ctx, gpid, r.req) - if err := wrapRPCFailure(resp, err); err != nil { - return nil, err - } - return nil, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/op.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/op.go deleted file mode 100644 index a05e224..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/op.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// Request is the generic interface of all Pegasus operations. -type Request interface { - Validate() error - - Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/set.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/set.go deleted file mode 100644 index 8564bfa..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/set.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// Set inherits op.Request. -type Set struct { - HashKey []byte - SortKey []byte - Value []byte - TTL time.Duration - - req *rrdb.UpdateRequest -} - -// Validate arguments. -func (r *Set) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if err := validateSortKey(r.SortKey); err != nil { - return err - } - if err := validateValue(r.Value); err != nil { - return err - } - - key := encodeHashKeySortKey(r.HashKey, r.SortKey) - val := &base.Blob{Data: r.Value} - expireTsSec := int32(0) - if r.TTL != 0 { - expireTsSec = expireTsSeconds(r.TTL) - } - r.req = &rrdb.UpdateRequest{Key: key, Value: val, ExpireTsSeconds: expireTsSec} - return nil -} - -// Run operation. -func (r *Set) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.Put(ctx, gpid, r.req) - if err := wrapRPCFailure(resp, err); err != nil { - return 0, err - } - return nil, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/sortkey_count.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/sortkey_count.go deleted file mode 100644 index 9a49332..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/sortkey_count.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// SortKeyCount inherits op.Request. -type SortKeyCount struct { - HashKey []byte - - req *base.Blob -} - -// Validate arguments. -func (r *SortKeyCount) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - r.req = &base.Blob{Data: r.HashKey} - return nil -} - -// Run operation. -func (r *SortKeyCount) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.SortKeyCount(ctx, gpid, r.req) - if err = wrapRPCFailure(resp, err); err != nil { - return nil, err - } - return resp.Count, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/ttl.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/ttl.go deleted file mode 100644 index e76766d..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/ttl.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "context" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/session" -) - -// TTL inherits op.Request. -type TTL struct { - HashKey []byte - SortKey []byte - - req *base.Blob -} - -// Validate arguments. -func (r *TTL) Validate() error { - if err := validateHashKey(r.HashKey); err != nil { - return err - } - if err := validateSortKey(r.SortKey); err != nil { - return err - } - r.req = encodeHashKeySortKey(r.HashKey, r.SortKey) - return nil -} - -// Run operation. -// Returns -2 if entry doesn't exist. -func (r *TTL) Run(ctx context.Context, gpid *base.Gpid, rs *session.ReplicaSession) (interface{}, error) { - resp, err := rs.TTL(ctx, gpid, r.req) - err = wrapRPCFailure(resp, err) - if err == base.NotFound { - // Success for non-existed entry. - return -2, nil - } - if err != nil { - return -2, err - } - return int(resp.GetTTLSeconds()), nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/utils.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/utils.go deleted file mode 100644 index 9c3f26a..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/op/utils.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package op - -import ( - "encoding/binary" - "fmt" - "math" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" -) - -func validateHashKey(hashKey []byte) error { - if hashKey == nil { - return fmt.Errorf("InvalidParameter: hashkey must not be nil") - } - if len(hashKey) == 0 { - return fmt.Errorf("InvalidParameter: hashkey must not be empty") - } - if len(hashKey) > math.MaxUint16 { - return fmt.Errorf("InvalidParameter: length of hashkey (%d) must be less than %d", len(hashKey), math.MaxUint16) - } - return nil -} - -func validateValue(value []byte) error { - if value == nil { - return fmt.Errorf("InvalidParameter: value must not be nil") - } - return nil -} - -func validateValues(values [][]byte) error { - if values == nil { - return fmt.Errorf("InvalidParameter: values must not be nil") - } - if len(values) == 0 { - return fmt.Errorf("InvalidParameter: values must not be empty") - } - for i, value := range values { - if value == nil { - return fmt.Errorf("InvalidParameter: values[%d] must not be nil", i) - } - } - return nil -} - -func validateSortKey(sortKey []byte) error { - if sortKey == nil { - return fmt.Errorf("InvalidParameter: sortkey must not be nil") - } - return nil -} - -func validateSortKeys(sortKeys [][]byte) error { - if sortKeys == nil { - return fmt.Errorf("InvalidParameter: sortkeys must not be nil") - } - if len(sortKeys) == 0 { - return fmt.Errorf("InvalidParameter: sortkeys must not be empty") - } - for i, sortKey := range sortKeys { - if sortKey == nil { - return fmt.Errorf("InvalidParameter: sortkeys[%d] must not be nil", i) - } - } - return nil -} - -func encodeHashKeySortKey(hashKey []byte, sortKey []byte) *base.Blob { - hashKeyLen := len(hashKey) - sortKeyLen := len(sortKey) - - blob := &base.Blob{ - Data: make([]byte, 2+hashKeyLen+sortKeyLen), - } - - binary.BigEndian.PutUint16(blob.Data, uint16(hashKeyLen)) - - if hashKeyLen > 0 { - copy(blob.Data[2:], hashKey) - } - - if sortKeyLen > 0 { - copy(blob.Data[2+hashKeyLen:], sortKey) - } - - return blob -} - -func expireTsSeconds(ttl time.Duration) int32 { - if ttl == 0 { - return 0 - } - // 1451606400 means seconds since 2016.01.01-00:00:00 GMT - return int32(ttl.Seconds()) + int32(time.Now().Unix()-1451606400) -} - -type rpcResponse interface { - GetError() int32 -} - -func wrapRPCFailure(resp rpcResponse, err error) error { - if err != nil { - return err - } - err = base.NewRocksDBErrFromInt(resp.GetError()) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/retry_failover.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/retry_failover.go deleted file mode 100644 index a3be148..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/retry_failover.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package pegasus - -import ( - "context" - "time" - - "github.com/cenkalti/backoff/v4" -) - -type tableRPCOp func() (confUpdated bool, result interface{}, err error) - -// retryFailOver retries the operation when it encounters replica fail-over, until context reaches deadline. -func retryFailOver(ctx context.Context, op tableRPCOp) (interface{}, error) { - bf := backoff.NewExponentialBackOff() - bf.InitialInterval = time.Second - bf.Multiplier = 2 - for { - confUpdated, res, err := op() - backoffCh := time.After(bf.NextBackOff()) - if confUpdated { // must fail - select { - case <-backoffCh: - continue - case <-ctx.Done(): - err = ctx.Err() - break - } - } - return res, err - } -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/scanner.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/scanner.go deleted file mode 100644 index 9faa1b6..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/scanner.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -import ( - "context" - "encoding/binary" - "fmt" - "sync/atomic" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/pegalog" -) - -// ScannerOptions is the options for GetScanner and GetUnorderedScanners. -type ScannerOptions struct { - BatchSize int // internal buffer batch size - StartInclusive bool // if the startSortKey is included - StopInclusive bool // if the stopSortKey is included - HashKeyFilter Filter - SortKeyFilter Filter - NoValue bool // only fetch hash_key and sort_key, but not fetch value -} - -const ( - batchScanning = 0 - batchScanFinished = -1 // Scanner's batch is finished, clean up it and switch to the status batchEmpty - batchEmpty = -2 - batchError = -3 -) - -// Scanner defines the interface of client-side scanning. -type Scanner interface { - // Grabs the next entry. - Next(ctx context.Context) (completed bool, hashKey []byte, sortKey []byte, value []byte, err error) - - Close() error -} - -type pegasusScanner struct { - table *pegasusTableConnector - startKey *base.Blob - stopKey *base.Blob - options *ScannerOptions - - gpidSlice []*base.Gpid - gpidIndex int // index of gpidSlice[] - curGpid *base.Gpid // current gpid - - batchEntries []*KeyValue - batchIndex int - batchStatus int64 - - isNextRunning atomic.Value - - closed bool - logger pegalog.Logger -} - -// NewScanOptions returns the default ScannerOptions. -func NewScanOptions() *ScannerOptions { - return &ScannerOptions{ - BatchSize: 1000, - StartInclusive: true, - StopInclusive: false, - HashKeyFilter: Filter{Type: FilterTypeNoFilter, Pattern: nil}, - SortKeyFilter: Filter{Type: FilterTypeNoFilter, Pattern: nil}, - NoValue: false, - } -} - -func newPegasusScannerImpl(table *pegasusTableConnector, gpidSlice []*base.Gpid, options *ScannerOptions, - startKey *base.Blob, stopKey *base.Blob) Scanner { - scanner := &pegasusScanner{ - table: table, - gpidSlice: gpidSlice, - options: options, - startKey: startKey, - stopKey: stopKey, - batchIndex: -1, - batchStatus: batchScanFinished, - gpidIndex: len(gpidSlice), - batchEntries: make([]*KeyValue, 0), - closed: false, - logger: pegalog.GetLogger(), - } - scanner.isNextRunning.Store(0) - return scanner -} - -func newPegasusScanner(table *pegasusTableConnector, gpid *base.Gpid, options *ScannerOptions, - startKey *base.Blob, stopKey *base.Blob) Scanner { - gpidSlice := []*base.Gpid{gpid} - return newPegasusScannerImpl(table, gpidSlice, options, startKey, stopKey) -} - -func newPegasusScannerForUnorderedScanners(table *pegasusTableConnector, gpidSlice []*base.Gpid, - options *ScannerOptions) Scanner { - options.StartInclusive = true - options.StopInclusive = false - return newPegasusScannerImpl(table, gpidSlice, options, &base.Blob{Data: []byte{0x00, 0x00}}, - &base.Blob{Data: []byte{0xFF, 0xFF}}) -} - -func (p *pegasusScanner) Next(ctx context.Context) (completed bool, hashKey []byte, - sortKey []byte, value []byte, err error) { - if p.batchStatus == batchError { - err = fmt.Errorf("last Next() failed") - return - } - if p.closed { - err = fmt.Errorf("scanner is closed") - return - } - - completed, hashKey, sortKey, value, err = func() (completed bool, hashKey []byte, sortKey []byte, value []byte, err error) { - // TODO(tangyanzhao): This method is not thread safe, should use r/w lock - // Prevent two concurrent calls on Next of the same Scanner. - if p.isNextRunning.Load() != 0 { - err = fmt.Errorf("there can be no concurrent calls on Next of the same Scanner") - return - } - p.isNextRunning.Store(1) - defer p.isNextRunning.Store(0) - return p.doNext(ctx) - }() - - if err != nil { - p.batchStatus = batchError - } - - err = WrapError(err, OpNext) - return -} - -func (p *pegasusScanner) doNext(ctx context.Context) (completed bool, hashKey []byte, - sortKey []byte, value []byte, err error) { - // until we have the valid batch - for p.batchIndex++; p.batchIndex >= len(p.batchEntries); p.batchIndex++ { - if p.batchStatus == batchScanFinished { - if p.gpidIndex <= 0 { - completed = true - p.logger.Print(" Scanning on all partitions has been completed") - return - } - p.gpidIndex-- - p.curGpid = p.gpidSlice[p.gpidIndex] - p.batchClear() - } else if p.batchStatus == batchEmpty { - return p.startScanPartition(ctx) - } else { - // request nextBatch - return p.nextBatch(ctx) - } - } - // batch.SortKey= - hashKey, sortKey, err = restoreSortKeyHashKey(p.batchEntries[p.batchIndex].SortKey) - value = p.batchEntries[p.batchIndex].Value - return -} - -func (p *pegasusScanner) batchClear() { - p.batchEntries = make([]*KeyValue, 0) - p.batchIndex = -1 - p.batchStatus = batchEmpty -} - -func (p *pegasusScanner) startScanPartition(ctx context.Context) (completed bool, hashKey []byte, - sortKey []byte, value []byte, err error) { - request := rrdb.NewGetScannerRequest() - if len(p.batchEntries) == 0 { - request.StartKey = p.startKey - request.StartInclusive = p.options.StartInclusive - } else { - request.StartKey = &base.Blob{Data: p.batchEntries[len(p.batchEntries)-1].SortKey} - request.StartInclusive = false - } - request.StopKey = p.stopKey - request.StopInclusive = p.options.StopInclusive - request.BatchSize = int32(p.options.BatchSize) - request.NoValue = p.options.NoValue - - request.HashKeyFilterType = rrdb.FilterType(p.options.HashKeyFilter.Type) - request.HashKeyFilterPattern = &base.Blob{} - if p.options.HashKeyFilter.Pattern != nil { - request.HashKeyFilterPattern.Data = p.options.HashKeyFilter.Pattern - } - - request.SortKeyFilterType = rrdb.FilterType(p.options.SortKeyFilter.Type) - request.SortKeyFilterPattern = &base.Blob{} - if p.options.SortKeyFilter.Pattern != nil { - request.SortKeyFilterPattern.Data = p.options.SortKeyFilter.Pattern - } - - part := p.table.getPartitionByGpid(p.curGpid) - response, err := part.GetScanner(ctx, p.curGpid, request) - - err = p.onRecvScanResponse(response, err) - if err == nil { - return p.doNext(ctx) - } - - return -} - -func (p *pegasusScanner) nextBatch(ctx context.Context) (completed bool, hashKey []byte, - sortKey []byte, value []byte, err error) { - request := &rrdb.ScanRequest{ContextID: p.batchStatus} - part := p.table.getPartitionByGpid(p.curGpid) - response, err := part.Scan(ctx, p.curGpid, request) - err = p.onRecvScanResponse(response, err) - if err == nil { - return p.doNext(ctx) - } - - return -} - -func (p *pegasusScanner) onRecvScanResponse(response *rrdb.ScanResponse, err error) error { - if err == nil { - if response.Error == 0 { - // ERR_OK - p.batchEntries = make([]*KeyValue, len(response.Kvs)) - for i := 0; i < len(response.Kvs); i++ { - p.batchEntries[i] = &KeyValue{ - SortKey: response.Kvs[i].Key.Data, // batch.SortKey= - Value: response.Kvs[i].Value.Data, - } - } - - p.batchIndex = -1 - p.batchStatus = response.ContextID - } else if response.Error == 1 { - // scan context has been removed - p.batchStatus = batchEmpty - } else { - // rpc succeed, but operation encounter some error in server side - return base.NewRocksDBErrFromInt(response.Error) - } - } else { - // rpc failed - return fmt.Errorf("scan failed with error:" + err.Error()) - } - - return nil -} - -func (p *pegasusScanner) Close() error { - var err error - - // try to close in 100ms, - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) - defer cancel() - - // if batchScanFinished or batchEmpty, server side will clear scanner automatically - // if not, clear scanner manually - if p.batchStatus >= batchScanning { - part := p.table.getPartitionByGpid(p.curGpid) - err = part.ClearScanner(ctx, p.curGpid, p.batchStatus) - if err == nil { - p.batchStatus = batchScanFinished - } - } - - p.gpidIndex = 0 - p.closed = true - return WrapError(err, OpScannerClose) -} - -func restoreSortKeyHashKey(key []byte) (hashKey []byte, sortKey []byte, err error) { - if key == nil || len(key) < 2 { - return nil, nil, fmt.Errorf("unable to restore key: %s", key) - } - - hashKeyLen := 0xFFFF & binary.BigEndian.Uint16(key[:2]) - if hashKeyLen != 0xFFFF && int(2+hashKeyLen) <= len(key) { - hashKey = key[2 : 2+hashKeyLen] - sortKey = key[2+hashKeyLen:] - return hashKey, sortKey, nil - } - - return nil, nil, fmt.Errorf("unable to restore key, hashKey length invalid") -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/table_connector.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/table_connector.go deleted file mode 100644 index dbf1533..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/table_connector.go +++ /dev/null @@ -1,768 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "math" - "sync" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/replication" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/pegalog" - "github.com/XiaoMi/pegasus-go-client/pegasus/op" - "github.com/XiaoMi/pegasus-go-client/session" - "gopkg.in/tomb.v2" - kerrors "k8s.io/apimachinery/pkg/util/errors" -) - -// KeyValue is the returned type of MultiGet and MultiGetRange. -type KeyValue struct { - SortKey, Value []byte -} - -// CompositeKey is a composition of HashKey and SortKey. -type CompositeKey struct { - HashKey, SortKey []byte -} - -// MultiGetOptions is the options for MultiGet and MultiGetRange, defaults to DefaultMultiGetOptions. -type MultiGetOptions struct { - StartInclusive bool - StopInclusive bool - SortKeyFilter Filter - - // MaxFetchCount and MaxFetchSize limit the size of returned result. - - // Max count of k-v pairs to be fetched. MaxFetchCount <= 0 means no limit. - MaxFetchCount int - - // Max size of k-v pairs to be fetched. MaxFetchSize <= 0 means no limit. - MaxFetchSize int - - // Query order - Reverse bool - - // Whether to retrieve keys only, without value. - // Enabling this option will reduce the network load, improve the RPC latency. - NoValue bool -} - -// DefaultMultiGetOptions defines the defaults of MultiGetOptions. -var DefaultMultiGetOptions = &MultiGetOptions{ - StartInclusive: true, - StopInclusive: false, - SortKeyFilter: Filter{ - Type: FilterTypeNoFilter, - Pattern: nil, - }, - MaxFetchCount: 100, - MaxFetchSize: 100000, - NoValue: false, -} - -// TableConnector is used to communicate with single Pegasus table. -type TableConnector interface { - // Get retrieves the entry for `hashKey` + `sortKey`. - // Returns nil if no entry matches. - // `hashKey` : CAN'T be nil or empty. - // `sortKey` : CAN'T be nil but CAN be empty. - Get(ctx context.Context, hashKey []byte, sortKey []byte) ([]byte, error) - - // Set the entry for `hashKey` + `sortKey` to `value`. - // If Set is called or `ttl` == 0, no data expiration is specified. - // `hashKey` : CAN'T be nil or empty. - // `sortKey` / `value` : CAN'T be nil but CAN be empty. - Set(ctx context.Context, hashKey []byte, sortKey []byte, value []byte) error - SetTTL(ctx context.Context, hashKey []byte, sortKey []byte, value []byte, ttl time.Duration) error - - // Delete the entry for `hashKey` + `sortKey`. - // `hashKey` : CAN'T be nil or empty. - // `sortKey` : CAN'T be nil but CAN be empty. - Del(ctx context.Context, hashKey []byte, sortKey []byte) error - - // MultiGet/MultiGetOpt retrieves the multiple entries for `hashKey` + `sortKeys[i]` atomically in one operation. - // MultiGet is identical to MultiGetOpt except that the former uses DefaultMultiGetOptions as `options`. - // - // If `sortKeys` are given empty or nil, all entries under `hashKey` will be retrieved. - // `hashKey` : CAN'T be nil or empty. - // `sortKeys[i]` : CAN'T be nil but CAN be empty. - // - // The returned key-value pairs are sorted by sort key in ascending order. - // Returns nil if no entries match. - // Returns true if all data is fetched, false if only partial data is fetched. - // - MultiGet(ctx context.Context, hashKey []byte, sortKeys [][]byte) ([]*KeyValue, bool, error) - MultiGetOpt(ctx context.Context, hashKey []byte, sortKeys [][]byte, options *MultiGetOptions) ([]*KeyValue, bool, error) - - // MultiGetRange retrieves the multiple entries under `hashKey`, between range (`startSortKey`, `stopSortKey`), - // atomically in one operation. - // - // startSortKey: nil or len(startSortKey) == 0 means start from begin. - // stopSortKey: nil or len(stopSortKey) == 0 means stop to end. - // `hashKey` : CAN'T be nil. - // - // The returned key-value pairs are sorted by sort keys in ascending order. - // Returns nil if no entries match. - // Returns true if all data is fetched, false if only partial data is fetched. - // - MultiGetRange(ctx context.Context, hashKey []byte, startSortKey []byte, stopSortKey []byte) ([]*KeyValue, bool, error) - MultiGetRangeOpt(ctx context.Context, hashKey []byte, startSortKey []byte, stopSortKey []byte, options *MultiGetOptions) ([]*KeyValue, bool, error) - - // MultiSet sets the multiple entries for `hashKey` + `sortKeys[i]` atomically in one operation. - // `hashKey` / `sortKeys` / `values` : CAN'T be nil or empty. - // `sortKeys[i]` / `values[i]` : CAN'T be nil but CAN be empty. - MultiSet(ctx context.Context, hashKey []byte, sortKeys [][]byte, values [][]byte) error - MultiSetOpt(ctx context.Context, hashKey []byte, sortKeys [][]byte, values [][]byte, ttl time.Duration) error - - // MultiDel deletes the multiple entries under `hashKey` all atomically in one operation. - // `hashKey` / `sortKeys` : CAN'T be nil or empty. - // `sortKeys[i]` : CAN'T be nil but CAN be empty. - MultiDel(ctx context.Context, hashKey []byte, sortKeys [][]byte) error - - // Returns ttl(time-to-live) in seconds: -1 if ttl is not set; -2 if entry doesn't exist. - // `hashKey` : CAN'T be nil or empty. - // `sortKey` : CAN'T be nil but CAN be empty. - TTL(ctx context.Context, hashKey []byte, sortKey []byte) (int, error) - - // Check value existence for the entry for `hashKey` + `sortKey`. - // `hashKey`: CAN'T be nil or empty. - Exist(ctx context.Context, hashKey []byte, sortKey []byte) (bool, error) - - // Get Scanner for {startSortKey, stopSortKey} within hashKey. - // startSortKey: nil or len(startSortKey) == 0 means start from begin. - // stopSortKey: nil or len(stopSortKey) == 0 means stop to end. - // `hashKey`: CAN'T be nil or empty. - GetScanner(ctx context.Context, hashKey []byte, startSortKey []byte, stopSortKey []byte, options *ScannerOptions) (Scanner, error) - - // Get Scanners for all data in pegasus, the count of scanners will - // be no more than maxSplitCount - GetUnorderedScanners(ctx context.Context, maxSplitCount int, options *ScannerOptions) ([]Scanner, error) - - // Atomically check and set value by key from the cluster. The value will be set if and only if check passed. - // The sort key for checking and setting can be the same or different. - // - // `checkSortKey`: The sort key for checking. - // `setSortKey`: The sort key for setting. - // `checkOperand`: - CheckAndSet(ctx context.Context, hashKey []byte, checkSortKey []byte, checkType CheckType, - checkOperand []byte, setSortKey []byte, setValue []byte, options *CheckAndSetOptions) (*CheckAndSetResult, error) - - // Returns the count of sortkeys under hashkey. - // `hashKey`: CAN'T be nil or empty. - SortKeyCount(ctx context.Context, hashKey []byte) (int64, error) - - // Atomically increment value by key from the cluster. - // Returns the new value. - // `hashKey` / `sortKeys` : CAN'T be nil or empty - Incr(ctx context.Context, hashKey []byte, sortKey []byte, increment int64) (int64, error) - - // Gets values from a batch of CompositeKeys. Internally it distributes each key - // into a Get call and wait until all returned. - // - // `keys`: CAN'T be nil or empty, `hashkey` in `keys` can't be nil or empty either. - // The returned values are in sequence order of each key, aka `keys[i] => values[i]`. - // If keys[i] is not found, or the Get failed, values[i] is set nil. - // - // Returns a non-nil `err` once there's a failed Get call. It doesn't mean all calls failed. - // - // NOTE: this operation is not guaranteed to be atomic - BatchGet(ctx context.Context, keys []CompositeKey) (values [][]byte, err error) - - Close() error -} - -type pegasusTableConnector struct { - meta *session.MetaManager - replica *session.ReplicaManager - - logger pegalog.Logger - - tableName string - appID int32 - parts []*replicaNode - mu sync.RWMutex - - confUpdateCh chan bool - tom tomb.Tomb -} - -type replicaNode struct { - session *session.ReplicaSession - pconf *replication.PartitionConfiguration -} - -// ConnectTable queries for the configuration of the given table, and set up connection to -// the replicas which the table locates on. -func ConnectTable(ctx context.Context, tableName string, meta *session.MetaManager, replica *session.ReplicaManager) (TableConnector, error) { - p := &pegasusTableConnector{ - tableName: tableName, - meta: meta, - replica: replica, - confUpdateCh: make(chan bool, 1), - logger: pegalog.GetLogger(), - } - - // if the session became unresponsive, TableConnector auto-triggers - // a update of the routing table. - p.replica.SetUnresponsiveHandler(func(n session.NodeSession) { - p.tryConfUpdate(errors.New("session unresponsive for long"), n) - }) - - if err := p.updateConf(ctx); err != nil { - return nil, err - } - - p.tom.Go(p.loopForAutoUpdate) - return p, nil -} - -// Update configuration of this table. -func (p *pegasusTableConnector) updateConf(ctx context.Context) error { - resp, err := p.meta.QueryConfig(ctx, p.tableName) - if err == nil { - err = p.handleQueryConfigResp(resp) - } - if err != nil { - return fmt.Errorf("failed to connect table(%s): %s", p.tableName, err) - } - return nil -} - -func (p *pegasusTableConnector) handleQueryConfigResp(resp *replication.QueryCfgResponse) error { - if resp.Err.Errno != base.ERR_OK.String() { - return errors.New(resp.Err.Errno) - } - if resp.PartitionCount == 0 || len(resp.Partitions) != int(resp.PartitionCount) { - return fmt.Errorf("invalid table configuration: response [%v]", resp) - } - - p.mu.Lock() - defer p.mu.Unlock() - - p.appID = resp.AppID - - if len(resp.Partitions) > len(p.parts) { - // during partition split or first configuration update of client. - for _, part := range p.parts { - part.session.Close() - } - p.parts = make([]*replicaNode, len(resp.Partitions)) - } - - // TODO(wutao1): make sure PartitionIndex are continuous - for _, pconf := range resp.Partitions { - if pconf == nil || pconf.Primary == nil || pconf.Primary.GetRawAddress() == 0 { - return fmt.Errorf("unable to resolve routing table [appid: %d]: [%v]", p.appID, pconf) - } - r := &replicaNode{ - pconf: pconf, - session: p.replica.GetReplica(pconf.Primary.GetAddress()), - } - p.parts[pconf.Pid.PartitionIndex] = r - } - return nil -} - -func validateHashKey(hashKey []byte) error { - if hashKey == nil { - return fmt.Errorf("InvalidParameter: hashkey must not be nil") - } - if len(hashKey) == 0 { - return fmt.Errorf("InvalidParameter: hashkey must not be empty") - } - if len(hashKey) > math.MaxUint16 { - return fmt.Errorf("InvalidParameter: length of hashkey (%d) must be less than %d", len(hashKey), math.MaxUint16) - } - return nil -} - -func validateCompositeKeys(keys []CompositeKey) error { - if keys == nil { - return fmt.Errorf("InvalidParameter: CompositeKeys must not be nil") - } - if len(keys) == 0 { - return fmt.Errorf("InvalidParameter: CompositeKeys must not be empty") - } - return nil -} - -// WrapError wraps up the internal errors for ensuring that all types of errors -// returned by public interfaces are pegasus.PError. -func WrapError(err error, op OpType) error { - if err != nil { - if pe, ok := err.(*PError); ok { - pe.Op = op - return pe - } - return &PError{ - Err: err, - Op: op, - } - } - return nil -} - -func (p *pegasusTableConnector) wrapPartitionError(err error, gpid *base.Gpid, replica *session.ReplicaSession, opType OpType) error { - err = WrapError(err, opType) - if err == nil { - return nil - } - perr := err.(*PError) - if perr.Err != nil { - perr.Err = fmt.Errorf("%s [%s, %s, table=%s]", perr.Err, gpid, replica, p.tableName) - } else { - perr.Err = fmt.Errorf("[%s, %s, table=%s]", gpid, replica, p.tableName) - } - return perr -} - -func (p *pegasusTableConnector) Get(ctx context.Context, hashKey []byte, sortKey []byte) ([]byte, error) { - res, err := p.runPartitionOp(ctx, hashKey, &op.Get{HashKey: hashKey, SortKey: sortKey}, OpGet) - if err != nil { - return nil, err - } - if res == nil { // indicates the record is not found - return nil, nil - } - return res.([]byte), err -} - -func (p *pegasusTableConnector) SetTTL(ctx context.Context, hashKey []byte, sortKey []byte, value []byte, ttl time.Duration) error { - req := &op.Set{HashKey: hashKey, SortKey: sortKey, Value: value, TTL: ttl} - _, err := p.runPartitionOp(ctx, hashKey, req, OpSet) - return err -} - -func (p *pegasusTableConnector) Set(ctx context.Context, hashKey []byte, sortKey []byte, value []byte) error { - return p.SetTTL(ctx, hashKey, sortKey, value, 0) -} - -func (p *pegasusTableConnector) Del(ctx context.Context, hashKey []byte, sortKey []byte) error { - req := &op.Del{HashKey: hashKey, SortKey: sortKey} - _, err := p.runPartitionOp(ctx, hashKey, req, OpDel) - return err -} - -func setRequestByOption(options *MultiGetOptions, request *rrdb.MultiGetRequest) { - request.MaxKvCount = int32(options.MaxFetchCount) - request.MaxKvSize = int32(options.MaxFetchSize) - request.StartInclusive = options.StartInclusive - request.StopInclusive = options.StopInclusive - request.SortKeyFilterType = rrdb.FilterType(options.SortKeyFilter.Type) - request.SortKeyFilterPattern = &base.Blob{Data: options.SortKeyFilter.Pattern} - request.Reverse = options.Reverse - request.NoValue = options.NoValue -} - -func (p *pegasusTableConnector) MultiGetOpt(ctx context.Context, hashKey []byte, sortKeys [][]byte, options *MultiGetOptions) ([]*KeyValue, bool, error) { - req := &op.MultiGet{HashKey: hashKey, SortKeys: sortKeys, Req: rrdb.NewMultiGetRequest()} - setRequestByOption(options, req.Req) - res, err := p.runPartitionOp(ctx, hashKey, req, OpMultiGet) - if err != nil { - return nil, false, err - } - return extractMultiGetResult(res.(*op.MultiGetResult)) -} - -func (p *pegasusTableConnector) MultiGet(ctx context.Context, hashKey []byte, sortKeys [][]byte) ([]*KeyValue, bool, error) { - return p.MultiGetOpt(ctx, hashKey, sortKeys, DefaultMultiGetOptions) -} - -func (p *pegasusTableConnector) MultiGetRangeOpt(ctx context.Context, hashKey []byte, startSortKey []byte, stopSortKey []byte, options *MultiGetOptions) ([]*KeyValue, bool, error) { - req := &op.MultiGet{HashKey: hashKey, StartSortkey: startSortKey, StopSortkey: stopSortKey, Req: rrdb.NewMultiGetRequest()} - setRequestByOption(options, req.Req) - res, err := p.runPartitionOp(ctx, hashKey, req, OpMultiGetRange) - if err != nil { - return nil, false, err - } - return extractMultiGetResult(res.(*op.MultiGetResult)) -} - -func extractMultiGetResult(res *op.MultiGetResult) ([]*KeyValue, bool, error) { - if len(res.KVs) == 0 { - return nil, res.AllFetched, nil - } - kvs := make([]*KeyValue, len(res.KVs)) - for i, blobKv := range res.KVs { - kvs[i] = &KeyValue{ - SortKey: blobKv.Key.Data, - Value: blobKv.Value.Data, - } - } - return kvs, res.AllFetched, nil -} - -func (p *pegasusTableConnector) MultiGetRange(ctx context.Context, hashKey []byte, startSortKey []byte, stopSortKey []byte) ([]*KeyValue, bool, error) { - return p.MultiGetRangeOpt(ctx, hashKey, startSortKey, stopSortKey, DefaultMultiGetOptions) -} - -func (p *pegasusTableConnector) MultiSet(ctx context.Context, hashKey []byte, sortKeys [][]byte, values [][]byte) error { - return p.MultiSetOpt(ctx, hashKey, sortKeys, values, 0) -} - -func (p *pegasusTableConnector) MultiSetOpt(ctx context.Context, hashKey []byte, sortKeys [][]byte, values [][]byte, ttl time.Duration) error { - req := &op.MultiSet{HashKey: hashKey, SortKeys: sortKeys, Values: values, TTL: ttl} - _, err := p.runPartitionOp(ctx, hashKey, req, OpMultiSet) - return err -} - -func (p *pegasusTableConnector) MultiDel(ctx context.Context, hashKey []byte, sortKeys [][]byte) error { - _, err := p.runPartitionOp(ctx, hashKey, &op.MultiDel{HashKey: hashKey, SortKeys: sortKeys}, OpMultiDel) - return err -} - -// -2 means entry not found. -func (p *pegasusTableConnector) TTL(ctx context.Context, hashKey []byte, sortKey []byte) (int, error) { - res, err := p.runPartitionOp(ctx, hashKey, &op.TTL{HashKey: hashKey, SortKey: sortKey}, OpTTL) - return res.(int), err -} - -func (p *pegasusTableConnector) Exist(ctx context.Context, hashKey []byte, sortKey []byte) (bool, error) { - ttl, err := p.TTL(ctx, hashKey, sortKey) - if err == nil { - if ttl == -2 { - return false, nil - } - return true, nil - } - return false, WrapError(err, OpExist) -} - -func (p *pegasusTableConnector) GetScanner(ctx context.Context, hashKey []byte, startSortKey []byte, stopSortKey []byte, - options *ScannerOptions) (Scanner, error) { - scanner, err := func() (Scanner, error) { - if err := validateHashKey(hashKey); err != nil { - return nil, err - } - - start := encodeHashKeySortKey(hashKey, startSortKey) - var stop *base.Blob - if len(stopSortKey) == 0 { - stop = encodeHashKeySortKey(hashKey, []byte{0xFF, 0xFF}) // []byte{0xFF, 0xFF} means the max sortKey value - options.StopInclusive = false - } else { - stop = encodeHashKeySortKey(hashKey, stopSortKey) - } - - if options.SortKeyFilter.Type == FilterTypeMatchPrefix { - prefixStartBlob := encodeHashKeySortKey(hashKey, options.SortKeyFilter.Pattern) - - // if the prefixStartKey generated by pattern is greater than the startKey, start from the prefixStartKey - if bytes.Compare(prefixStartBlob.Data, start.Data) > 0 { - start = prefixStartBlob - options.StartInclusive = true - } - - prefixStop := encodeNextBytesByKeys(hashKey, options.SortKeyFilter.Pattern) - - // if the prefixStopKey generated by pattern is less than the stopKey, end to the prefixStopKey - if bytes.Compare(prefixStop.Data, stop.Data) <= 0 { - stop = prefixStop - options.StopInclusive = false - } - } - - cmp := bytes.Compare(start.Data, stop.Data) - if cmp < 0 || (cmp == 0 && options.StartInclusive && options.StopInclusive) { - gpid, err := p.getGpid(start.Data) - if err != nil && gpid != nil { - return nil, err - } - return newPegasusScanner(p, gpid, options, start, stop), nil - } - return nil, fmt.Errorf("the scanning interval MUST NOT BE EMPTY") - }() - return scanner, WrapError(err, OpGetScanner) -} - -func (p *pegasusTableConnector) GetUnorderedScanners(ctx context.Context, maxSplitCount int, - options *ScannerOptions) ([]Scanner, error) { - scanners, err := func() ([]Scanner, error) { - if maxSplitCount <= 0 { - return nil, fmt.Errorf("invalid maxSplitCount: %d", maxSplitCount) - } - allGpid := p.getAllGpid() - total := len(allGpid) - - var split int // the actual split count - if total < maxSplitCount { - split = total - } else { - split = maxSplitCount - } - scanners := make([]Scanner, split) - - // k: the smallest multiple of split which is greater than or equal to total - k := 1 - for ; k*split < total; k++ { - } - left := total - k*(split-1) - - sliceLen := 0 - id := 0 - for i := 0; i < split; i++ { - if i == 0 { - sliceLen = left - } else { - sliceLen = k - } - gpidSlice := make([]*base.Gpid, sliceLen) - for j := 0; j < sliceLen; j++ { - gpidSlice[j] = allGpid[id] - id++ - } - scanners[i] = newPegasusScannerForUnorderedScanners(p, gpidSlice, options) - } - return scanners, nil - }() - return scanners, WrapError(err, OpGetUnorderedScanners) -} - -func (p *pegasusTableConnector) CheckAndSet(ctx context.Context, hashKey []byte, checkSortKey []byte, checkType CheckType, - checkOperand []byte, setSortKey []byte, setValue []byte, options *CheckAndSetOptions) (*CheckAndSetResult, error) { - - if options == nil { - options = &CheckAndSetOptions{} - } - request := rrdb.NewCheckAndSetRequest() - request.CheckType = rrdb.CasCheckType(checkType) - request.CheckOperand = &base.Blob{Data: checkOperand} - request.CheckSortKey = &base.Blob{Data: checkSortKey} - request.HashKey = &base.Blob{Data: hashKey} - request.SetExpireTsSeconds = 0 - if options.SetValueTTLSeconds != 0 { - request.SetExpireTsSeconds = expireTsSeconds(time.Second * time.Duration(options.SetValueTTLSeconds)) - } - request.SetSortKey = &base.Blob{Data: setSortKey} - request.SetValue = &base.Blob{Data: setValue} - request.ReturnCheckValue = options.ReturnCheckValue - if !bytes.Equal(checkSortKey, setSortKey) { - request.SetDiffSortKey = true - } else { - request.SetDiffSortKey = false - } - - req := &op.CheckAndSet{Req: request} - res, err := p.runPartitionOp(ctx, hashKey, req, OpCheckAndSet) - if err != nil { - return nil, err - } - casRes := res.(*op.CheckAndSetResult) - return &CheckAndSetResult{ - SetSucceed: casRes.SetSucceed, - CheckValue: casRes.CheckValue, - CheckValueExist: casRes.CheckValueExist, - CheckValueReturned: casRes.CheckValueReturned, - }, nil -} - -func (p *pegasusTableConnector) SortKeyCount(ctx context.Context, hashKey []byte) (int64, error) { - res, err := p.runPartitionOp(ctx, hashKey, &op.SortKeyCount{HashKey: hashKey}, OpSortKeyCount) - if err != nil { - return 0, err - } - return res.(int64), nil -} - -func (p *pegasusTableConnector) Incr(ctx context.Context, hashKey []byte, sortKey []byte, increment int64) (int64, error) { - req := &op.Incr{HashKey: hashKey, SortKey: sortKey, Increment: increment} - res, err := p.runPartitionOp(ctx, hashKey, req, OpIncr) - if err != nil { - return 0, err - } - return res.(int64), nil -} - -func (p *pegasusTableConnector) runPartitionOp(ctx context.Context, hashKey []byte, req op.Request, optype OpType) (interface{}, error) { - // validate arguments - if err := req.Validate(); err != nil { - return 0, WrapError(err, optype) - } - gpid, part := p.getPartition(hashKey) - res, err := retryFailOver(ctx, func() (confUpdated bool, result interface{}, err error) { - result, err = req.Run(ctx, gpid, part) - confUpdated, err = p.handleReplicaError(err, part) - return - }) - return res, p.wrapPartitionError(err, gpid, part, optype) -} - -func (p *pegasusTableConnector) BatchGet(ctx context.Context, keys []CompositeKey) (values [][]byte, err error) { - v, err := func() ([][]byte, error) { - if err := validateCompositeKeys(keys); err != nil { - return nil, err - } - - values = make([][]byte, len(keys)) - funcs := make([]func() error, 0, len(keys)) - for i := 0; i < len(keys); i++ { - idx := i - funcs = append(funcs, func() (subErr error) { - key := keys[idx] - values[idx], subErr = p.Get(ctx, key.HashKey, key.SortKey) - if subErr != nil { - values[idx] = nil - return subErr - } - return nil - }) - } - return values, kerrors.AggregateGoroutines(funcs...) - }() - return v, WrapError(err, OpBatchGet) -} - -func getPartitionIndex(hashKey []byte, partitionCount int) int32 { - return int32(crc64Hash(hashKey) % uint64(partitionCount)) -} - -func (p *pegasusTableConnector) getPartition(hashKey []byte) (*base.Gpid, *session.ReplicaSession) { - p.mu.RLock() - defer p.mu.RUnlock() - - gpid := &base.Gpid{ - Appid: p.appID, - PartitionIndex: getPartitionIndex(hashKey, len(p.parts)), - } - part := p.parts[gpid.PartitionIndex].session - - return gpid, part -} - -func (p *pegasusTableConnector) getPartitionByGpid(gpid *base.Gpid) *session.ReplicaSession { - p.mu.RLock() - defer p.mu.RUnlock() - return p.parts[gpid.PartitionIndex].session -} - -func (p *pegasusTableConnector) Close() error { - p.tom.Kill(errors.New("table closed")) - return nil -} - -func (p *pegasusTableConnector) handleReplicaError(err error, replica *session.ReplicaSession) (bool, error) { - if err != nil { - confUpdate := false - - switch err { - case base.ERR_OK: - // should not happen - return false, nil - - case base.ERR_TIMEOUT: - case context.DeadlineExceeded: - case context.Canceled: - // timeout will not trigger a configuration update - - case base.ERR_NOT_ENOUGH_MEMBER: - case base.ERR_CAPACITY_EXCEEDED: - - case base.ERR_BUSY: - // throttled by server, skip confUpdate - - default: - confUpdate = true - } - - switch err { - case base.ERR_BUSY: - err = errors.New(err.Error() + " Rate of requests exceeds the throughput limit") - case base.ERR_INVALID_STATE: - err = errors.New(err.Error() + " The target replica is not primary") - case base.ERR_OBJECT_NOT_FOUND: - err = errors.New(err.Error() + " The replica server doesn't serve this partition") - } - - if confUpdate { - // we need to check if there's newer configuration. - p.tryConfUpdate(err, replica) - } - - return confUpdate, err - } - return false, nil -} - -// tryConfUpdate makes an attempt to update table configuration by querying meta server. -func (p *pegasusTableConnector) tryConfUpdate(err error, replica session.NodeSession) { - select { - case p.confUpdateCh <- true: - p.logger.Printf("trigger configuration update of table [%s] due to RPC failure [%s] to %s", p.tableName, err, replica) - default: - } -} - -func (p *pegasusTableConnector) loopForAutoUpdate() error { - for { - select { - case <-p.confUpdateCh: - p.selfUpdate() - case <-p.tom.Dying(): - return nil - } - - // sleep a while - select { - case <-time.After(time.Second): - case <-p.tom.Dying(): - return nil - } - } -} - -func (p *pegasusTableConnector) selfUpdate() bool { - // ignore the returned error - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - if err := p.updateConf(ctx); err != nil { - p.logger.Printf("self update failed [table: %s]: %s", p.tableName, err.Error()) - } - - // flush confUpdateCh - select { - case <-p.confUpdateCh: - default: - } - - return true -} - -func (p *pegasusTableConnector) getGpid(key []byte) (*base.Gpid, error) { - if key == nil || len(key) < 2 { - return nil, fmt.Errorf("unable to getGpid by key: %s", key) - } - - hashKeyLen := 0xFFFF & binary.BigEndian.Uint16(key[:2]) - if hashKeyLen != 0xFFFF && int(2+hashKeyLen) <= len(key) { - gpid := &base.Gpid{Appid: p.appID} - if hashKeyLen == 0 { - gpid.PartitionIndex = int32(crc64Hash(key[2:]) % uint64(len(p.parts))) - } else { - gpid.PartitionIndex = int32(crc64Hash(key[2:hashKeyLen+2]) % uint64(len(p.parts))) - } - return gpid, nil - - } - return nil, fmt.Errorf("unable to getGpid, hashKey length invalid") -} - -func (p *pegasusTableConnector) getAllGpid() []*base.Gpid { - p.mu.RLock() - defer p.mu.RUnlock() - count := len(p.parts) - ret := make([]*base.Gpid, count) - for i := 0; i < count; i++ { - ret[i] = &base.Gpid{Appid: p.appID, PartitionIndex: int32(i)} - } - return ret -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/util.go b/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/util.go deleted file mode 100644 index bf99a15..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/pegasus/util.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package pegasus - -import ( - "encoding/binary" - "hash/crc64" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" -) - -func encodeHashKeySortKey(hashKey []byte, sortKey []byte) *base.Blob { - hashKeyLen := len(hashKey) - sortKeyLen := len(sortKey) - - blob := &base.Blob{ - Data: make([]byte, 2+hashKeyLen+sortKeyLen), - } - - binary.BigEndian.PutUint16(blob.Data, uint16(hashKeyLen)) - - if hashKeyLen > 0 { - copy(blob.Data[2:], hashKey) - } - - if sortKeyLen > 0 { - copy(blob.Data[2+hashKeyLen:], sortKey) - } - - return blob -} - -func encodeNextBytesByKeys(hashKey []byte, sortKey []byte) *base.Blob { - key := encodeHashKeySortKey(hashKey, sortKey) - array := key.Data - - i := len(array) - 1 - for ; i >= 2; i-- { - if array[i] != 0xFF { - array[i]++ - break - } - } - return &base.Blob{Data: array[:i+1]} -} - -var crc64Table = crc64.MakeTable(0x9a6c9329ac4bc9b5) - -func crc64Hash(data []byte) uint64 { - return crc64.Checksum(data, crc64Table) -} - -func expireTsSeconds(ttl time.Duration) int32 { - if ttl == 0 { - return 0 - } - // 1451606400 means seconds since 2016.01.01-00:00:00 GMT - return int32(ttl.Seconds()) + int32(time.Now().Unix()-1451606400) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/codec.go b/vendor/github.com/XiaoMi/pegasus-go-client/rpc/codec.go deleted file mode 100644 index 034358e..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/codec.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2017, Xiaomi, Inc. - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package rpc - -// Codec defines the interface that this client uses to encode and decode messages. -// Note that implementations of this interface must be thread safe; -// a Codec's methods can be called from concurrent goroutines. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error - // String returns the name of the Codec implementation. - String() string -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/rpc_conn.go b/vendor/github.com/XiaoMi/pegasus-go-client/rpc/rpc_conn.go deleted file mode 100644 index c504e66..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/rpc_conn.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package rpc - -import ( - "errors" - "io" - "net" - "sync" - "time" - - "github.com/XiaoMi/pegasus-go-client/pegalog" -) - -// TODO(wutao1): make these parameters configurable -const ( - ConnDialTimeout = time.Second * 3 - ConnReadTimeout = 30 * time.Second - ConnWriteTimeout = 10 * time.Second -) - -type ConnState int - -const ( - // The state that a connection starts from. - ConnStateInit ConnState = iota - - ConnStateConnecting - - ConnStateReady - - // The state that indicates some error occurred in the previous operations. - ConnStateTransientFailure - - // The state that RpcConn will turn into after Close() is called. - ConnStateClosed -) - -func (s ConnState) String() string { - switch s { - case ConnStateInit: - return "ConnStateInit" - case ConnStateConnecting: - return "ConnStateConnecting" - case ConnStateReady: - return "ConnStateReady" - case ConnStateTransientFailure: - return "ConnStateTransientFailure" - case ConnStateClosed: - return "ConnStateClosed" - default: - panic("no such state") - } -} - -var ErrConnectionNotReady = errors.New("connection is not ready") - -// RpcConn maintains a network connection to a particular endpoint. -type RpcConn struct { - Endpoint string - - wstream *WriteStream - rstream *ReadStream - conn net.Conn - - writeTimeout time.Duration - readTimeout time.Duration - - cstate ConnState - mu sync.RWMutex - - logger pegalog.Logger -} - -// thread-safe -func (rc *RpcConn) GetState() ConnState { - rc.mu.RLock() - defer rc.mu.RUnlock() - return rc.cstate -} - -// thread-safe -func (rc *RpcConn) setState(state ConnState) { - rc.mu.Lock() - defer rc.mu.Unlock() - rc.cstate = state -} - -// This function is thread-safe. -func (rc *RpcConn) TryConnect() (err error) { - err = func() error { - // set state to ConnStateConnecting to - // make sure there's only 1 goroutine dialing simultaneously. - rc.mu.Lock() - defer rc.mu.Unlock() - if rc.cstate != ConnStateReady && rc.cstate != ConnStateConnecting { - rc.cstate = ConnStateConnecting - rc.mu.Unlock() - - // unlock for blocking call - d := &net.Dialer{ - Timeout: ConnDialTimeout, - } - conn, err := d.Dial("tcp", rc.Endpoint) - - rc.mu.Lock() - rc.conn = conn - if err != nil { - return err - } - tcpConn, _ := rc.conn.(*net.TCPConn) - tcpConn.SetNoDelay(true) - rc.setReady(rc.conn, rc.conn) - } - return err - }() - - if err != nil { - rc.setState(ConnStateTransientFailure) - } - return err -} - -// This function is thread-safe. -func (rc *RpcConn) Close() (err error) { - rc.mu.Lock() - defer rc.mu.Unlock() - - rc.cstate = ConnStateClosed - if rc.conn != nil { - err = rc.conn.Close() - } - - return -} - -func (rc *RpcConn) Write(msgBytes []byte) (err error) { - err = func() error { - if rc.GetState() != ConnStateReady { - return ErrConnectionNotReady - } - - tcpConn, ok := rc.conn.(*net.TCPConn) - if ok { - tcpConn.SetWriteDeadline(time.Now().Add(rc.writeTimeout)) - } - - return rc.wstream.Write(msgBytes) - }() - - if err != nil { - rc.setState(ConnStateTransientFailure) - } - return err -} - -// Read is not intended to be cancellable using context by outside user. -// The only approach to cancel the operation is to close the connection. -// If the current socket is not well established for reading, the operation will -// fail and return error immediately. -// This function is not-thread-safe, because the underlying TCP IO buffer -// is not-thread-safe. Package users should call Read in a single goroutine. -func (rc *RpcConn) Read(size int) (bytes []byte, err error) { - bytes, err = func() ([]byte, error) { - if rc.GetState() != ConnStateReady { - return nil, ErrConnectionNotReady - } - - tcpConn, ok := rc.conn.(*net.TCPConn) - if ok { - tcpConn.SetReadDeadline(time.Now().Add(rc.readTimeout)) - } - - bytes, err = rc.rstream.Next(size) - return bytes, err - }() - - if err != nil && !IsNetworkTimeoutErr(err) { - rc.setState(ConnStateTransientFailure) - } - return bytes, err -} - -// Returns an idle connection. -func NewRpcConn(addr string) *RpcConn { - return &RpcConn{ - Endpoint: addr, - logger: pegalog.GetLogger(), - cstate: ConnStateInit, - readTimeout: ConnReadTimeout, - writeTimeout: ConnWriteTimeout, - } -} - -// Not thread-safe -func (rc *RpcConn) SetWriteTimeout(timeout time.Duration) { - rc.writeTimeout = timeout -} - -// Not thread-safe -func (rc *RpcConn) SetReadTimeout(timeout time.Duration) { - rc.readTimeout = timeout -} - -func (rc *RpcConn) setReady(reader io.Reader, writer io.Writer) { - rc.cstate = ConnStateReady - rc.rstream = NewReadStream(reader) - rc.wstream = NewWriteStream(writer) -} - -// Create a fake client with specified reader and writer. -func NewFakeRpcConn(reader io.Reader, writer io.Writer) *RpcConn { - conn := NewRpcConn("") - conn.setReady(reader, writer) - return conn -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/stream_in.go b/vendor/github.com/XiaoMi/pegasus-go-client/rpc/stream_in.go deleted file mode 100644 index d0f3f77..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/stream_in.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package rpc - -import ( - "bufio" - "io" -) - -const ( -// In our experiment(go-ycsb, 100w insertions, 100 goroutines, 100 bytes record size), -// rpc performance can significantly be improved by increasing read buffer. -// As we continue to double the buffer size from 256KB to 512KB, the throughput -// as well as average latency stop gaining improvement. -// See Issue#4 for more detail. -// -// read buffer 64kb -// INSERT - Count: 192010, Avg(us): 3482, Min(us): 386, Max(us): 42951, 95th(us): 8000, 99th(us): 14000 -// INSERT - Count: 387387, Avg(us): 3447, Min(us): 356, Max(us): 45644, 95th(us): 8000, 99th(us): 14000 -// INSERT - Count: 584503, Avg(us): 3412, Min(us): 356, Max(us): 45644, 95th(us): 7000, 99th(us): 13000 -// INSERT - Count: 774928, Avg(us): 3438, Min(us): 356, Max(us): 45644, 95th(us): 7000, 99th(us): 13000 -// INSERT - Count: 965434, Avg(us): 3451, Min(us): 338, Max(us): 77322, 95th(us): 7000, 99th(us): 13000 -// INSERT - Count: 1000000, Avg(us): 3443, Min(us): 338, Max(us): 77322, 95th(us): 7000, 99th(us): 13000 -// Run finished, takes 51.837521852s -// -// read buffer 128kb -// INSERT - Count: 225254, Avg(us): 3139, Min(us): 357, Max(us): 36666, 95th(us): 7000, 99th(us): 14000 -// INSERT - Count: 458059, Avg(us): 3110, Min(us): 357, Max(us): 42223, 95th(us): 7000, 99th(us): 14000 -// INSERT - Count: 683384, Avg(us): 3135, Min(us): 340, Max(us): 42223, 95th(us): 7000, 99th(us): 14000 -// INSERT - Count: 915600, Avg(us): 3157, Min(us): 322, Max(us): 57728, 95th(us): 7000, 99th(us): 15000 -// INSERT - Count: 999999, Avg(us): 3140, Min(us): 322, Max(us): 57728, 95th(us): 7000, 99th(us): 15000 -// Run finished, takes 43.703584059s -// -// read buffer 256kb -// INSERT - Count: 366927, Avg(us): 2511, Min(us): 347, Max(us): 50030, 95th(us): 7000, 99th(us): 15000 -// INSERT - Count: 701266, Avg(us): 2649, Min(us): 344, Max(us): 73976, 95th(us): 8000, 99th(us): 17000 -// INSERT - Count: 1000000, Avg(us): 2615, Min(us): 340, Max(us): 73976, 95th(us): 8000, 99th(us): 17000 -// Run finished, takes 28.381599693s -// -// read buffer 512kb -// INSERT - Count: 366486, Avg(us): 2596, Min(us): 332, Max(us): 83957, 95th(us): 8000, 99th(us): 17000 -// INSERT - Count: 725917, Avg(us): 2624, Min(us): 320, Max(us): 83957, 95th(us): 8000, 99th(us): 18000 -// INSERT - Count: 999999, Avg(us): 2634, Min(us): 320, Max(us): 95898, 95th(us): 8000, 99th(us): 18000 -// Run finished, takes 27.91239882s - -// readStreamBufferSize = 1024 * 256 -) - -// low-level rpc reader. -type ReadStream struct { - bufReader *bufio.Reader -} - -func (r *ReadStream) Next(toRead int) ([]byte, error) { - buf := make([]byte, toRead) - var total = 0 - - readSz, err := r.bufReader.Read(buf) - total += readSz - for total < toRead && err == nil { - readSz, err = r.bufReader.Read(buf[total:]) - total += readSz - } - - if err != nil { - return nil, err - } - return buf, nil -} - -func NewReadStream(reader io.Reader) *ReadStream { - // By default readStreamBufferSize is not used in order to save memory usage, - // since for pegasus2, user may create a large number of replicaSession - // (100 TableConnectors eg.). - // TODO(wutao1): provide function to create read stream with readStreamBufferSize - return &ReadStream{ - bufReader: bufio.NewReader(reader), - } -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/stream_out.go b/vendor/github.com/XiaoMi/pegasus-go-client/rpc/stream_out.go deleted file mode 100644 index 3cfe31b..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/stream_out.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package rpc - -import ( - "io" -) - -// low-level rpc writer. -type WriteStream struct { - writer io.Writer -} - -// NewWriteStream always receives a *net.TcpConn as `writer`, except in -// testing it can accept a buffer as the fake writer. -func NewWriteStream(writer io.Writer) *WriteStream { - return &WriteStream{ - writer: writer, - } -} - -// invoke an asynchronous write for message. -func (s *WriteStream) Write(msgBytes []byte) error { - var err error - var total = 0 - var written = 0 - - toWrite := len(msgBytes) - - for total < toWrite && err == nil { - written, err = s.writer.Write(msgBytes[total:]) - total += written - } - - return err -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/utils.go b/vendor/github.com/XiaoMi/pegasus-go-client/rpc/utils.go deleted file mode 100644 index 258f941..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/rpc/utils.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package rpc - -import ( - "io" - "net" -) - -// IsNetworkTimeoutErr returns whether the given error is a timeout error. -func IsNetworkTimeoutErr(err error) bool { - // if it's a network timeout error - opErr, ok := err.(*net.OpError) - if ok { - return opErr.Timeout() - } - - return false -} - -// IsNetworkClosed returns whether the session is shutdown by the peer. -func IsNetworkClosed(err error) bool { - opErr, ok := err.(*net.OpError) - if ok { - return opErr.Err == io.EOF - } - - return err == io.EOF -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/addr.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/addr.go deleted file mode 100644 index f737865..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/addr.go +++ /dev/null @@ -1,37 +0,0 @@ -package session - -import ( - "fmt" - "net" -) - -// ResolveMetaAddr into a list of TCP4 addresses. Error is returned if the given `addrs` are not either -// a list of valid TCP4 addresses, or a resolvable hostname. -func ResolveMetaAddr(addrs []string) ([]string, error) { - if len(addrs) == 0 { - return nil, fmt.Errorf("meta server list should not be empty") - } - - // case#1: all addresses are in TCP4 already - allTCPAddr := true - for _, addr := range addrs { - _, err := net.ResolveTCPAddr("tcp4", addr) - if err != nil { - allTCPAddr = false - break - } - } - if allTCPAddr { - return addrs, nil - } - - // case#2: address is a hostname - if len(addrs) == 1 { - actualAddrs, err := net.LookupHost(addrs[0]) - if err == nil { - return actualAddrs, nil - } - } - - return nil, fmt.Errorf("illegal meta addresses: %s", addrs) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/admin_rpc_types.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/admin_rpc_types.go deleted file mode 100644 index 4cf8bbc..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/admin_rpc_types.go +++ /dev/null @@ -1,360 +0,0 @@ -// Code generated by "generator -i=admin.csv > admin_rpc_types.go"; DO NOT EDIT. -package session - -import ( - "context" - "fmt" - - "github.com/XiaoMi/pegasus-go-client/idl/admin" - "github.com/XiaoMi/pegasus-go-client/idl/base" -) - -func (ms *metaSession) dropApp(ctx context.Context, req *admin.DropAppRequest) (*admin.DropAppResponse, error) { - arg := admin.NewAdminClientDropAppArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_DROP_APP") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientDropAppResult) - return ret.GetSuccess(), nil -} - -// DropApp is auto-generated -func (m *MetaManager) DropApp(ctx context.Context, req *admin.DropAppRequest) (*admin.DropAppResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.dropApp(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.DropAppResponse), fmt.Errorf("DropApp failed: %s", resp.GetErr().String()) - } - return resp.(*admin.DropAppResponse), nil - } - return nil, err -} - -func (ms *metaSession) createApp(ctx context.Context, req *admin.CreateAppRequest) (*admin.CreateAppResponse, error) { - arg := admin.NewAdminClientCreateAppArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_CREATE_APP") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientCreateAppResult) - return ret.GetSuccess(), nil -} - -// CreateApp is auto-generated -func (m *MetaManager) CreateApp(ctx context.Context, req *admin.CreateAppRequest) (*admin.CreateAppResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.createApp(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.CreateAppResponse), fmt.Errorf("CreateApp failed: %s", resp.GetErr().String()) - } - return resp.(*admin.CreateAppResponse), nil - } - return nil, err -} - -func (ms *metaSession) recallApp(ctx context.Context, req *admin.RecallAppRequest) (*admin.RecallAppResponse, error) { - arg := admin.NewAdminClientRecallAppArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_RECALL_APP") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientRecallAppResult) - return ret.GetSuccess(), nil -} - -// RecallApp is auto-generated -func (m *MetaManager) RecallApp(ctx context.Context, req *admin.RecallAppRequest) (*admin.RecallAppResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.recallApp(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.RecallAppResponse), fmt.Errorf("RecallApp failed: %s", resp.GetErr().String()) - } - return resp.(*admin.RecallAppResponse), nil - } - return nil, err -} - -func (ms *metaSession) listApps(ctx context.Context, req *admin.ListAppsRequest) (*admin.ListAppsResponse, error) { - arg := admin.NewAdminClientListAppsArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_LIST_APPS") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientListAppsResult) - return ret.GetSuccess(), nil -} - -// ListApps is auto-generated -func (m *MetaManager) ListApps(ctx context.Context, req *admin.ListAppsRequest) (*admin.ListAppsResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.listApps(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.ListAppsResponse), fmt.Errorf("ListApps failed: %s", resp.GetErr().String()) - } - return resp.(*admin.ListAppsResponse), nil - } - return nil, err -} - -func (ms *metaSession) queryDuplication(ctx context.Context, req *admin.DuplicationQueryRequest) (*admin.DuplicationQueryResponse, error) { - arg := admin.NewAdminClientQueryDuplicationArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_QUERY_DUPLICATION") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientQueryDuplicationResult) - return ret.GetSuccess(), nil -} - -// QueryDuplication is auto-generated -func (m *MetaManager) QueryDuplication(ctx context.Context, req *admin.DuplicationQueryRequest) (*admin.DuplicationQueryResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.queryDuplication(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.DuplicationQueryResponse), fmt.Errorf("QueryDuplication failed: %s", resp.GetErr().String()) - } - return resp.(*admin.DuplicationQueryResponse), nil - } - return nil, err -} - -func (ms *metaSession) modifyDuplication(ctx context.Context, req *admin.DuplicationModifyRequest) (*admin.DuplicationModifyResponse, error) { - arg := admin.NewAdminClientModifyDuplicationArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_MODIFY_DUPLICATION") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientModifyDuplicationResult) - return ret.GetSuccess(), nil -} - -// ModifyDuplication is auto-generated -func (m *MetaManager) ModifyDuplication(ctx context.Context, req *admin.DuplicationModifyRequest) (*admin.DuplicationModifyResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.modifyDuplication(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.DuplicationModifyResponse), fmt.Errorf("ModifyDuplication failed: %s", resp.GetErr().String()) - } - return resp.(*admin.DuplicationModifyResponse), nil - } - return nil, err -} - -func (ms *metaSession) addDuplication(ctx context.Context, req *admin.DuplicationAddRequest) (*admin.DuplicationAddResponse, error) { - arg := admin.NewAdminClientAddDuplicationArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_ADD_DUPLICATION") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientAddDuplicationResult) - return ret.GetSuccess(), nil -} - -// AddDuplication is auto-generated -func (m *MetaManager) AddDuplication(ctx context.Context, req *admin.DuplicationAddRequest) (*admin.DuplicationAddResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.addDuplication(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.DuplicationAddResponse), fmt.Errorf("AddDuplication failed: %s", resp.GetErr().String()) - } - return resp.(*admin.DuplicationAddResponse), nil - } - return nil, err -} - -func (ms *metaSession) queryAppInfo(ctx context.Context, req *admin.QueryAppInfoRequest) (*admin.QueryAppInfoResponse, error) { - arg := admin.NewAdminClientQueryAppInfoArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_QUERY_APP_INFO") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientQueryAppInfoResult) - return ret.GetSuccess(), nil -} - -// QueryAppInfo is auto-generated -func (m *MetaManager) QueryAppInfo(ctx context.Context, req *admin.QueryAppInfoRequest) (*admin.QueryAppInfoResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.queryAppInfo(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.QueryAppInfoResponse), fmt.Errorf("QueryAppInfo failed: %s", resp.GetErr().String()) - } - return resp.(*admin.QueryAppInfoResponse), nil - } - return nil, err -} - -func (ms *metaSession) updateAppEnv(ctx context.Context, req *admin.UpdateAppEnvRequest) (*admin.UpdateAppEnvResponse, error) { - arg := admin.NewAdminClientUpdateAppEnvArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_UPDATE_APP_ENV") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientUpdateAppEnvResult) - return ret.GetSuccess(), nil -} - -// UpdateAppEnv is auto-generated -func (m *MetaManager) UpdateAppEnv(ctx context.Context, req *admin.UpdateAppEnvRequest) (*admin.UpdateAppEnvResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.updateAppEnv(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.UpdateAppEnvResponse), fmt.Errorf("UpdateAppEnv failed: %s", resp.GetErr().String()) - } - return resp.(*admin.UpdateAppEnvResponse), nil - } - return nil, err -} - -func (ms *metaSession) listNodes(ctx context.Context, req *admin.ListNodesRequest) (*admin.ListNodesResponse, error) { - arg := admin.NewAdminClientListNodesArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_LIST_NODES") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientListNodesResult) - return ret.GetSuccess(), nil -} - -// ListNodes is auto-generated -func (m *MetaManager) ListNodes(ctx context.Context, req *admin.ListNodesRequest) (*admin.ListNodesResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.listNodes(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.ListNodesResponse), fmt.Errorf("ListNodes failed: %s", resp.GetErr().String()) - } - return resp.(*admin.ListNodesResponse), nil - } - return nil, err -} - -func (ms *metaSession) queryClusterInfo(ctx context.Context, req *admin.ClusterInfoRequest) (*admin.ClusterInfoResponse, error) { - arg := admin.NewAdminClientQueryClusterInfoArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_CLUSTER_INFO") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientQueryClusterInfoResult) - return ret.GetSuccess(), nil -} - -// QueryClusterInfo is auto-generated -func (m *MetaManager) QueryClusterInfo(ctx context.Context, req *admin.ClusterInfoRequest) (*admin.ClusterInfoResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.queryClusterInfo(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.ClusterInfoResponse), fmt.Errorf("QueryClusterInfo failed: %s", resp.GetErr().String()) - } - return resp.(*admin.ClusterInfoResponse), nil - } - return nil, err -} - -func (ms *metaSession) metaControl(ctx context.Context, req *admin.MetaControlRequest) (*admin.MetaControlResponse, error) { - arg := admin.NewAdminClientMetaControlArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_CONTROL_META") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientMetaControlResult) - return ret.GetSuccess(), nil -} - -// MetaControl is auto-generated -func (m *MetaManager) MetaControl(ctx context.Context, req *admin.MetaControlRequest) (*admin.MetaControlResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.metaControl(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.MetaControlResponse), fmt.Errorf("MetaControl failed: %s", resp.GetErr().String()) - } - return resp.(*admin.MetaControlResponse), nil - } - return nil, err -} - -func (ms *metaSession) queryBackupPolicy(ctx context.Context, req *admin.QueryBackupPolicyRequest) (*admin.QueryBackupPolicyResponse, error) { - arg := admin.NewAdminClientQueryBackupPolicyArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_QUERY_BACKUP_POLICY") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientQueryBackupPolicyResult) - return ret.GetSuccess(), nil -} - -// QueryBackupPolicy is auto-generated -func (m *MetaManager) QueryBackupPolicy(ctx context.Context, req *admin.QueryBackupPolicyRequest) (*admin.QueryBackupPolicyResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.queryBackupPolicy(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.QueryBackupPolicyResponse), fmt.Errorf("QueryBackupPolicy failed: %s", resp.GetErr().String()) - } - return resp.(*admin.QueryBackupPolicyResponse), nil - } - return nil, err -} - -func (ms *metaSession) balance(ctx context.Context, req *admin.BalanceRequest) (*admin.BalanceResponse, error) { - arg := admin.NewAdminClientBalanceArgs() - arg.Req = req - result, err := ms.call(ctx, arg, "RPC_CM_PROPOSE_BALANCER") - if err != nil { - return nil, fmt.Errorf("RPC to session %s failed: %s", ms, err) - } - ret, _ := result.(*admin.AdminClientBalanceResult) - return ret.GetSuccess(), nil -} - -// Balance is auto-generated -func (m *MetaManager) Balance(ctx context.Context, req *admin.BalanceRequest) (*admin.BalanceResponse, error) { - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.balance(rpcCtx, req) - }) - if err == nil { - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp.(*admin.BalanceResponse), fmt.Errorf("Balance failed: %s", resp.GetErr().String()) - } - return resp.(*admin.BalanceResponse), nil - } - return nil, err -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/codec.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/codec.go deleted file mode 100644 index 71f1838..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/codec.go +++ /dev/null @@ -1,410 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package session - -import ( - "bytes" - "encoding/binary" - "fmt" - "sync" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/admin" - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/cmd" - "github.com/XiaoMi/pegasus-go-client/idl/radmin" - "github.com/XiaoMi/pegasus-go-client/idl/replication" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/pegalog" - "github.com/XiaoMi/pegasus-go-client/rpc" - "github.com/pegasus-kv/thrift/lib/go/thrift" -) - -type PegasusCodec struct { - logger pegalog.Logger -} - -func NewPegasusCodec() *PegasusCodec { - return &PegasusCodec{logger: pegalog.GetLogger()} -} - -func (p *PegasusCodec) Marshal(v interface{}) ([]byte, error) { - r, _ := v.(*PegasusRpcCall) - - header := &thriftHeader{ - headerLength: uint32(thriftHeaderBytesLen), - appId: r.Gpid.Appid, - partitionIndex: r.Gpid.PartitionIndex, - threadHash: gpidToThreadHash(r.Gpid), - partitionHash: 0, - } - - // skip the first ThriftHeaderBytesLen bytes - buf := thrift.NewTMemoryBuffer() - buf.Write(make([]byte, thriftHeaderBytesLen)) - - // encode body into buffer - oprot := thrift.NewTBinaryProtocolTransport(buf) - - var err error - if err = oprot.WriteMessageBegin(r.Name, thrift.CALL, r.SeqId); err != nil { - return nil, err - } - if err = r.Args.Write(oprot); err != nil { - return nil, err - } - if err = oprot.WriteMessageEnd(); err != nil { - return nil, err - } - - // encode header into buffer - header.bodyLength = uint32(buf.Len() - thriftHeaderBytesLen) - header.marshall(buf.Bytes()[0:thriftHeaderBytesLen]) - - return buf.Bytes(), nil -} - -func (p *PegasusCodec) Unmarshal(data []byte, v interface{}) error { - r, _ := v.(*PegasusRpcCall) - - iprot := thrift.NewTBinaryProtocolTransport(thrift.NewStreamTransportR(bytes.NewBuffer(data))) - ec := &base.ErrorCode{} - if err := ec.Read(iprot); err != nil { - return err - } - - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return err - } - - r.Name = name - r.SeqId = seqId - - if ec.Errno != base.ERR_OK.String() { - // convert string to base.DsnErrCode - err, parseErr := base.DsnErrCodeString(ec.Errno) - if parseErr != nil { - p.logger.Print("failed to unmarshal the heading error code of rpc response: ", parseErr) - return parseErr - } - - r.Err = err - return nil - } - - nameToResultFunc, ok := nameToResultMap[name] - if !ok { - return fmt.Errorf("failed to find rpc name: %s", name) - } - r.Result = nameToResultFunc() - - // read response body - if err = r.Result.Read(iprot); err != nil { - return err - } - if err = iprot.ReadMessageEnd(); err != nil { - return err - } - - return nil -} - -func (p *PegasusCodec) String() string { - return "pegasus" -} - -// RegisterRPCResultHandler registers an external RPC that's not including in -// pegasus-go-client. -// -// The following example registers an response handler for Pegasus's remote-command RPC. -// Usage: -// -// ```go -// RegisterRpcResultHandler("RPC_CLI_CLI_CALL_ACK", func() RpcResponseResult { -// return &RemoteCmdServiceCallCommandResult{Success: new(string)} -// }) -// ``` -func RegisterRPCResultHandler(responseAck string, handler func() RpcResponseResult) { - nameToResultMapLock.Lock() - defer nameToResultMapLock.Unlock() - _, found := nameToResultMap[responseAck] - if found { - panic(fmt.Sprintf("register an registered RPC result handler: %s", responseAck)) - } else { - nameToResultMap[responseAck] = handler - } -} - -var nameToResultMapLock sync.Mutex -var nameToResultMap = map[string]func() RpcResponseResult{ - "RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX_ACK": func() RpcResponseResult { - return &rrdb.MetaQueryCfgResult{ - Success: replication.NewQueryCfgResponse(), - } - }, - "RPC_CM_CREATE_APP_ACK": func() RpcResponseResult { - return &admin.AdminClientCreateAppResult{ - Success: admin.NewCreateAppResponse(), - } - }, - "RPC_CM_DROP_APP_ACK": func() RpcResponseResult { - return &admin.AdminClientDropAppResult{ - Success: admin.NewDropAppResponse(), - } - }, - "RPC_CM_RECALL_APP_ACK": func() RpcResponseResult { - return &admin.AdminClientRecallAppResult{ - Success: admin.NewRecallAppResponse(), - } - }, - "RPC_CM_LIST_APPS_ACK": func() RpcResponseResult { - return &admin.AdminClientListAppsResult{ - Success: admin.NewListAppsResponse(), - } - }, - "RPC_QUERY_APP_INFO_ACK": func() RpcResponseResult { - return &admin.AdminClientQueryAppInfoResult{ - Success: admin.NewQueryAppInfoResponse(), - } - }, - "RPC_CM_UPDATE_APP_ENV_ACK": func() RpcResponseResult { - return &admin.AdminClientUpdateAppEnvResult{ - Success: admin.NewUpdateAppEnvResponse(), - } - }, - "RPC_CM_QUERY_DUPLICATION_ACK": func() RpcResponseResult { - return &admin.AdminClientQueryDuplicationResult{ - Success: admin.NewDuplicationQueryResponse(), - } - }, - "RPC_CM_MODIFY_DUPLICATION_ACK": func() RpcResponseResult { - return &admin.AdminClientModifyDuplicationResult{ - Success: admin.NewDuplicationModifyResponse(), - } - }, - "RPC_CM_ADD_DUPLICATION_ACK": func() RpcResponseResult { - return &admin.AdminClientAddDuplicationResult{ - Success: admin.NewDuplicationAddResponse(), - } - }, - "RPC_CM_QUERY_BACKUP_POLICY_ACK": func() RpcResponseResult { - return &admin.AdminClientQueryBackupPolicyResult{ - Success: admin.NewQueryBackupPolicyResponse(), - } - }, - "RPC_CM_CLUSTER_INFO_ACK": func() RpcResponseResult { - return &admin.AdminClientQueryClusterInfoResult{ - Success: admin.NewClusterInfoResponse(), - } - }, - "RPC_CM_CONTROL_META_ACK": func() RpcResponseResult { - return &admin.AdminClientMetaControlResult{ - Success: admin.NewMetaControlResponse(), - } - }, - "RPC_CM_LIST_NODES_ACK": func() RpcResponseResult { - return &admin.AdminClientListNodesResult{ - Success: admin.NewListNodesResponse(), - } - }, - "RPC_CM_PROPOSE_BALANCER_ACK": func() RpcResponseResult { - return &admin.AdminClientBalanceResult{ - Success: admin.NewBalanceResponse(), - } - }, - "RPC_QUERY_DISK_INFO_ACK": func() RpcResponseResult { - return &radmin.ReplicaClientQueryDiskInfoResult{ - Success: radmin.NewQueryDiskInfoResponse(), - } - }, - "RPC_REPLICA_DISK_MIGRATE_ACK": func() RpcResponseResult { - return &radmin.ReplicaClientDiskMigrateResult{ - Success: radmin.NewReplicaDiskMigrateResponse(), - } - }, - "RPC_RRDB_RRDB_GET_ACK": func() RpcResponseResult { - return &rrdb.RrdbGetResult{ - Success: rrdb.NewReadResponse(), - } - }, - "RPC_RRDB_RRDB_PUT_ACK": func() RpcResponseResult { - return &rrdb.RrdbPutResult{ - Success: rrdb.NewUpdateResponse(), - } - }, - "RPC_RRDB_RRDB_REMOVE_ACK": func() RpcResponseResult { - return &rrdb.RrdbRemoveResult{ - Success: rrdb.NewUpdateResponse(), - } - }, - "RPC_RRDB_RRDB_MULTI_GET_ACK": func() RpcResponseResult { - return &rrdb.RrdbMultiGetResult{ - Success: rrdb.NewMultiGetResponse(), - } - }, - "RPC_RRDB_RRDB_MULTI_REMOVE_ACK": func() RpcResponseResult { - return &rrdb.RrdbMultiRemoveResult{ - Success: rrdb.NewMultiRemoveResponse(), - } - }, - "RPC_RRDB_RRDB_MULTI_PUT_ACK": func() RpcResponseResult { - return &rrdb.RrdbMultiPutResult{ - Success: rrdb.NewUpdateResponse(), - } - }, - "RPC_RRDB_RRDB_TTL_ACK": func() RpcResponseResult { - return &rrdb.RrdbTTLResult{ - Success: rrdb.NewTTLResponse(), - } - }, - "RPC_RRDB_RRDB_GET_SCANNER_ACK": func() RpcResponseResult { - return &rrdb.RrdbGetScannerResult{ - Success: rrdb.NewScanResponse(), - } - }, - "RPC_RRDB_RRDB_SCAN_ACK": func() RpcResponseResult { - return &rrdb.RrdbScanResult{ - Success: rrdb.NewScanResponse(), - } - }, - "RPC_RRDB_RRDB_CHECK_AND_SET_ACK": func() RpcResponseResult { - return &rrdb.RrdbCheckAndSetResult{ - Success: rrdb.NewCheckAndSetResponse(), - } - }, - "RPC_RRDB_RRDB_SORTKEY_COUNT_ACK": func() RpcResponseResult { - return &rrdb.RrdbSortkeyCountResult{ - Success: rrdb.NewCountResponse(), - } - }, - "RPC_RRDB_RRDB_INCR_ACK": func() RpcResponseResult { - return &rrdb.RrdbIncrResult{ - Success: rrdb.NewIncrResponse(), - } - }, - "RPC_CLI_CLI_CALL_ACK": func() RpcResponseResult { - return &cmd.RemoteCmdServiceCallCommandResult{ - Success: new(string), - } - }, -} - -// MockCodec is only used for testing. -// By default it does nothing on marshalling and unmarshalling, -// thus it returns no error even if the input was ill-formed. -type MockCodec struct { - mars MarshalFunc - unmars UnmarshalFunc -} - -type UnmarshalFunc func(data []byte, v interface{}) error - -type MarshalFunc func(v interface{}) ([]byte, error) - -func (p *MockCodec) Marshal(v interface{}) ([]byte, error) { - if p.mars != nil { - return p.mars(v) - } - return nil, nil -} - -func (p *MockCodec) Unmarshal(data []byte, v interface{}) error { - if p.unmars != nil { - return p.unmars(data, v) - } - return nil -} - -func (p *MockCodec) String() string { - return "mock" -} - -func (p *MockCodec) MockMarshal(marshal MarshalFunc) { - p.mars = marshal -} - -func (p *MockCodec) MockUnMarshal(unmarshal UnmarshalFunc) { - p.unmars = unmarshal -} - -// a trait of the thrift-generated argument type (MetaQueryCfgArgs, RrdbPutArgs e.g.) -type RpcRequestArgs interface { - String() string - Write(oprot thrift.TProtocol) error -} - -// a trait of the thrift-generated result type (MetaQueryCfgResult e.g.) -type RpcResponseResult interface { - String() string - Read(iprot thrift.TProtocol) error -} - -type PegasusRpcCall struct { - Args RpcRequestArgs - Result RpcResponseResult - Name string // the rpc's name - SeqId int32 - Gpid *base.Gpid - RawReq []byte // the marshalled request in bytes - Err error - - // hooks on each stage during rpc processing - OnRpcCall time.Time - OnRpcSend time.Time - OnRpcRecv time.Time -} - -func (call *PegasusRpcCall) Trace() string { - return fmt.Sprintf("call->%dus->send->%dus->recv->%dus->now", - call.OnRpcSend.Sub(call.OnRpcCall)/time.Microsecond, - call.OnRpcRecv.Sub(call.OnRpcSend)/time.Microsecond, - time.Since(call.OnRpcRecv)/time.Microsecond) -} - -func (call *PegasusRpcCall) TilNow() time.Duration { - return time.Since(call.OnRpcCall) -} - -func MarshallPegasusRpc(codec rpc.Codec, seqId int32, gpid *base.Gpid, args RpcRequestArgs, name string) (*PegasusRpcCall, error) { - rcall := &PegasusRpcCall{} - rcall.Args = args - rcall.Name = name - rcall.SeqId = seqId - rcall.Gpid = gpid - - var err error - rcall.RawReq, err = codec.Marshal(rcall) - if err != nil { - return nil, err - } - return rcall, nil -} - -func ReadRpcResponse(conn *rpc.RpcConn, codec rpc.Codec) (*PegasusRpcCall, error) { - // read length field - lenBuf, err := conn.Read(4) - if err != nil && len(lenBuf) < 4 { - return nil, err - } - resplen := binary.BigEndian.Uint32(lenBuf) - if resplen < 4 { - return nil, fmt.Errorf("response length(%d) smaller than 4 bytes", resplen) - } - resplen -= 4 // 4 bytes for length - - // read data field - buf, err := conn.Read(int(resplen)) - if err != nil || len(buf) != int(resplen) { - return nil, err - } - - r := &PegasusRpcCall{} - if err := codec.Unmarshal(buf, r); err != nil { - return nil, err - } - - return r, nil -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/header.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/header.go deleted file mode 100644 index 7787729..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/header.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package session - -import ( - "encoding/binary" - "fmt" - - "github.com/XiaoMi/pegasus-go-client/idl/base" -) - -var thriftHeaderTypeStr = []byte{'T', 'H', 'F', 'T'} -var thriftHeaderBytesLen = 48 - -// thriftHeader stores the meta information of a particular RPC -type thriftHeader struct { - headerVersion uint32 - headerLength uint32 - headerCrc32 uint32 - bodyLength uint32 - bodyCrc32 uint32 - appId int32 - partitionIndex int32 - clientTimeout uint32 - threadHash int32 - partitionHash uint32 -} - -// Serialized this struct as the message header in pegasus messaging protocol. -// (See https://github.com/XiaoMi/pegasus/blob/master/docs/client-development.md) -func (t *thriftHeader) marshall(buf []byte) { - if len(buf) != thriftHeaderBytesLen { - panic(fmt.Sprintf("length of buf(%d) should be %d", len(buf), thriftHeaderBytesLen)) - } - - copy(buf[0:4], thriftHeaderTypeStr) - binary.BigEndian.PutUint32(buf[4:8], t.headerVersion) - binary.BigEndian.PutUint32(buf[8:12], t.headerLength) - binary.BigEndian.PutUint32(buf[12:16], t.headerCrc32) - binary.BigEndian.PutUint32(buf[16:20], t.bodyLength) - binary.BigEndian.PutUint32(buf[20:24], t.bodyCrc32) - binary.BigEndian.PutUint32(buf[24:28], uint32(t.appId)) - binary.BigEndian.PutUint32(buf[28:32], uint32(t.partitionIndex)) - binary.BigEndian.PutUint32(buf[32:36], t.clientTimeout) - binary.BigEndian.PutUint32(buf[36:40], uint32(t.threadHash)) - binary.BigEndian.PutUint32(buf[40:48], t.partitionHash) -} - -// Thread hash is a rDSN required header field. We copied the algorithm -// from java client. -func gpidToThreadHash(gpid *base.Gpid) int32 { - return gpid.Appid*7919 + gpid.PartitionIndex -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/meta_call.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/meta_call.go deleted file mode 100644 index cde7f7f..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/meta_call.go +++ /dev/null @@ -1,116 +0,0 @@ -package session - -import ( - "context" - "sync" - "sync/atomic" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" -) - -type metaCallFunc func(context.Context, *metaSession) (metaResponse, error) - -type metaResponse interface { - GetErr() *base.ErrorCode -} - -// metaCall encapsulates the leader switching of MetaServers. Each metaCall.Run represents -// a RPC call to the MetaServers. If during the process the leader meta changed, metaCall -// automatically switches to the new leader. -type metaCall struct { - respCh chan metaResponse - backupCh chan interface{} - callFunc metaCallFunc - - metas []*metaSession - lead int - // After a Run successfully ends, the current leader will be set in this field. - // If there is no meta failover, `newLead` equals to `lead`. - newLead uint32 -} - -func newMetaCall(lead int, metas []*metaSession, callFunc metaCallFunc) *metaCall { - return &metaCall{ - metas: metas, - lead: lead, - newLead: uint32(lead), - respCh: make(chan metaResponse), - callFunc: callFunc, - backupCh: make(chan interface{}), - } -} - -func (c *metaCall) Run(ctx context.Context) (metaResponse, error) { - // the subroutines will be cancelled when this call ends - subCtx, cancel := context.WithCancel(ctx) - wg := &sync.WaitGroup{} - wg.Add(2) // this waitgroup is used to ensure all goroutines exit after Run ends. - - go func() { - // issue RPC to leader - if !c.issueSingleMeta(subCtx, c.lead) { - select { - case <-subCtx.Done(): - case c.backupCh <- nil: - // after the leader failed, we immediately start another - // RPC to the backup. - } - } - wg.Done() - }() - - go func() { - // Automatically issue backup RPC after a period - // when the current leader is suspected unvailable. - select { - case <-time.After(1 * time.Second): // TODO(wutao): make it configurable - c.issueBackupMetas(subCtx) - case <-c.backupCh: - c.issueBackupMetas(subCtx) - case <-subCtx.Done(): - } - wg.Done() - }() - - // The result of meta query is always a context error, or success. - select { - case resp := <-c.respCh: - cancel() - wg.Wait() - return resp, nil - case <-ctx.Done(): - cancel() - wg.Wait() - return nil, ctx.Err() - } -} - -// issueSingleMeta returns false if we should try another meta -func (c *metaCall) issueSingleMeta(ctx context.Context, i int) bool { - meta := c.metas[i] - resp, err := c.callFunc(ctx, meta) - if err != nil || resp.GetErr().Errno == base.ERR_FORWARD_TO_OTHERS.String() { - return false - } - // the RPC succeeds, this meta becomes the new leader now. - atomic.StoreUint32(&c.newLead, uint32(i)) - select { - case <-ctx.Done(): - case c.respCh <- resp: - // notify the caller - } - return true -} - -func (c *metaCall) issueBackupMetas(ctx context.Context) { - for i := range c.metas { - if i == c.lead { - continue - } - // concurrently issue RPC to the rest of meta servers. - go func(idx int) { - c.issueSingleMeta(ctx, idx) - }(i) - } -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/meta_session.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/meta_session.go deleted file mode 100644 index b4e241d..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/meta_session.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package session - -import ( - "context" - "sync" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/replication" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - "github.com/XiaoMi/pegasus-go-client/pegalog" - kerrors "k8s.io/apimachinery/pkg/util/errors" -) - -// metaSession represents the network session between client and meta server. -type metaSession struct { - NodeSession - - logger pegalog.Logger -} - -func (ms *metaSession) call(ctx context.Context, args RpcRequestArgs, rpcName string) (RpcResponseResult, error) { - return ms.CallWithGpid(ctx, &base.Gpid{Appid: 0, PartitionIndex: 0}, args, rpcName) -} - -func (ms *metaSession) queryConfig(ctx context.Context, tableName string) (*replication.QueryCfgResponse, error) { - ms.logger.Printf("querying configuration of table(%s) from %s", tableName, ms) - - arg := rrdb.NewMetaQueryCfgArgs() - arg.Query = replication.NewQueryCfgRequest() - arg.Query.AppName = tableName - arg.Query.PartitionIndices = []int32{} - - result, err := ms.call(ctx, arg, "RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX") - if err != nil { - ms.logger.Printf("failed to query configuration from %s: %s", ms, err) - return nil, err - } - - ret, _ := result.(*rrdb.MetaQueryCfgResult) - return ret.GetSuccess(), nil -} - -// MetaManager manages the list of metas, but only the leader will it request to. -// If the one is not the actual leader, it will retry with another. -type MetaManager struct { - logger pegalog.Logger - - metaIPAddrs []string - metas []*metaSession - currentLeader int // current leader of meta servers - - // protect access of currentLeader - mu sync.RWMutex -} - -// -func NewMetaManager(addrs []string, creator NodeSessionCreator) *MetaManager { - metas := make([]*metaSession, len(addrs)) - metaIPAddrs := make([]string, len(addrs)) - for i, addr := range addrs { - metas[i] = &metaSession{ - NodeSession: creator(addr, NodeTypeMeta), - logger: pegalog.GetLogger(), - } - metaIPAddrs[i] = addr - } - - mm := &MetaManager{ - currentLeader: 0, - metas: metas, - metaIPAddrs: metaIPAddrs, - logger: pegalog.GetLogger(), - } - return mm -} - -func (m *MetaManager) call(ctx context.Context, callFunc metaCallFunc) (metaResponse, error) { - lead := m.getCurrentLeader() - call := newMetaCall(lead, m.metas, callFunc) - resp, err := call.Run(ctx) - if err == nil { - m.setCurrentLeader(int(call.newLead)) - } - return resp, err -} - -// QueryConfig queries table configuration from the leader of meta servers. If the leader was changed, -// it retries for other servers until it finds the true leader, unless no leader exists. -// Thread-Safe -func (m *MetaManager) QueryConfig(ctx context.Context, tableName string) (*replication.QueryCfgResponse, error) { - m.logger.Printf("querying configuration of table(%s) [metaList=%s]", tableName, m.metaIPAddrs) - resp, err := m.call(ctx, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { - return ms.queryConfig(rpcCtx, tableName) - }) - if err == nil { - queryCfgResp := resp.(*replication.QueryCfgResponse) - return queryCfgResp, nil - } - return nil, err -} - -func (m *MetaManager) getCurrentLeader() int { - m.mu.RLock() - defer m.mu.RUnlock() - - return m.currentLeader -} - -func (m *MetaManager) setCurrentLeader(lead int) { - m.mu.Lock() - defer m.mu.Unlock() - - m.currentLeader = lead -} - -// Close the sessions. -func (m *MetaManager) Close() error { - funcs := make([]func() error, len(m.metas)) - for i := 0; i < len(m.metas); i++ { - idx := i - funcs[idx] = func() error { - return m.metas[idx].Close() - } - } - return kerrors.AggregateGoroutines(funcs...) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/radmin_rpc_types.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/radmin_rpc_types.go deleted file mode 100644 index 7319032..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/radmin_rpc_types.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by "generator -i=radmin.csv > radmin_rpc_types.go"; DO NOT EDIT. -package session - -import ( - "context" - "fmt" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/radmin" -) - -// QueryDiskInfo is auto-generated -func (rs *ReplicaSession) QueryDiskInfo(ctx context.Context, req *radmin.QueryDiskInfoRequest) (*radmin.QueryDiskInfoResponse, error) { - arg := radmin.NewReplicaClientQueryDiskInfoArgs() - arg.Req = req - result, err := rs.CallWithGpid(ctx, &base.Gpid{Appid: 0, PartitionIndex: 0}, arg, "RPC_QUERY_DISK_INFO") - if err == nil { - ret, _ := result.(*radmin.ReplicaClientQueryDiskInfoResult) - resp := ret.GetSuccess() - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp, fmt.Errorf("QueryDiskInfo to session %s failed: %s", rs, resp.GetErr().String()) - } - return resp, nil - } - return nil, err -} - -// DiskMigrate is auto-generated -func (rs *ReplicaSession) DiskMigrate(ctx context.Context, req *radmin.ReplicaDiskMigrateRequest) (*radmin.ReplicaDiskMigrateResponse, error) { - arg := radmin.NewReplicaClientDiskMigrateArgs() - arg.Req = req - result, err := rs.CallWithGpid(ctx, &base.Gpid{Appid: 0, PartitionIndex: 0}, arg, "RPC_REPLICA_DISK_MIGRATE") - if err == nil { - ret, _ := result.(*radmin.ReplicaClientDiskMigrateResult) - resp := ret.GetSuccess() - if resp.GetErr().Errno != base.ERR_OK.String() { - return resp, fmt.Errorf("DiskMigrate to session %s failed: %s", rs, resp.GetErr().String()) - } - return resp, nil - } - return nil, err -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/replica_session.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/replica_session.go deleted file mode 100644 index 4d6f45f..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/replica_session.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package session - -import ( - "context" - "sync" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/idl/rrdb" - kerrors "k8s.io/apimachinery/pkg/util/errors" -) - -// ReplicaSession represents the network session between client and -// replica server. -type ReplicaSession struct { - NodeSession -} - -func (rs *ReplicaSession) Get(ctx context.Context, gpid *base.Gpid, key *base.Blob) (*rrdb.ReadResponse, error) { - args := &rrdb.RrdbGetArgs{Key: key} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_GET") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbGetResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) Put(ctx context.Context, gpid *base.Gpid, update *rrdb.UpdateRequest) (*rrdb.UpdateResponse, error) { - args := &rrdb.RrdbPutArgs{Update: update} - - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_PUT") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbPutResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) Del(ctx context.Context, gpid *base.Gpid, key *base.Blob) (*rrdb.UpdateResponse, error) { - args := &rrdb.RrdbRemoveArgs{Key: key} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_REMOVE") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbRemoveResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) MultiGet(ctx context.Context, gpid *base.Gpid, request *rrdb.MultiGetRequest) (*rrdb.MultiGetResponse, error) { - args := &rrdb.RrdbMultiGetArgs{Request: request} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_MULTI_GET") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbMultiGetResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) MultiSet(ctx context.Context, gpid *base.Gpid, request *rrdb.MultiPutRequest) (*rrdb.UpdateResponse, error) { - args := &rrdb.RrdbMultiPutArgs{Request: request} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_MULTI_PUT") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbMultiPutResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) MultiDelete(ctx context.Context, gpid *base.Gpid, request *rrdb.MultiRemoveRequest) (*rrdb.MultiRemoveResponse, error) { - args := &rrdb.RrdbMultiRemoveArgs{Request: request} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_MULTI_REMOVE") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbMultiRemoveResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) TTL(ctx context.Context, gpid *base.Gpid, key *base.Blob) (*rrdb.TTLResponse, error) { - args := &rrdb.RrdbTTLArgs{Key: key} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_TTL") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbTTLResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) GetScanner(ctx context.Context, gpid *base.Gpid, request *rrdb.GetScannerRequest) (*rrdb.ScanResponse, error) { - args := &rrdb.RrdbGetScannerArgs{Request: request} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_GET_SCANNER") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbGetScannerResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) Scan(ctx context.Context, gpid *base.Gpid, request *rrdb.ScanRequest) (*rrdb.ScanResponse, error) { - args := &rrdb.RrdbScanArgs{Request: request} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_SCAN") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbScanResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) ClearScanner(ctx context.Context, gpid *base.Gpid, contextId int64) error { - args := &rrdb.RrdbClearScannerArgs{ContextID: contextId} - _, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_CLEAR_SCANNER") - if err != nil { - return err - } - - return nil -} - -func (rs *ReplicaSession) CheckAndSet(ctx context.Context, gpid *base.Gpid, request *rrdb.CheckAndSetRequest) (*rrdb.CheckAndSetResponse, error) { - args := &rrdb.RrdbCheckAndSetArgs{Request: request} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_CHECK_AND_SET") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbCheckAndSetResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) SortKeyCount(ctx context.Context, gpid *base.Gpid, hashKey *base.Blob) (*rrdb.CountResponse, error) { - args := &rrdb.RrdbSortkeyCountArgs{HashKey: hashKey} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_SORTKEY_COUNT") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbSortkeyCountResult) - return ret.GetSuccess(), nil -} - -func (rs *ReplicaSession) Incr(ctx context.Context, gpid *base.Gpid, request *rrdb.IncrRequest) (*rrdb.IncrResponse, error) { - args := &rrdb.RrdbIncrArgs{Request: request} - result, err := rs.CallWithGpid(ctx, gpid, args, "RPC_RRDB_RRDB_INCR") - if err != nil { - return nil, err - } - - ret, _ := result.(*rrdb.RrdbIncrResult) - return ret.GetSuccess(), nil -} - -// ReplicaManager manages the pool of sessions to replica servers, so that -// different tables that locate on the same replica server can share one -// ReplicaSession, without the effort of creating a new connection. -type ReplicaManager struct { - // rpc address -> replica - replicas map[string]*ReplicaSession - sync.RWMutex - - creator NodeSessionCreator - - unresponsiveHandler UnresponsiveHandler -} - -// UnresponsiveHandler is a callback executed when the session is in unresponsive state. -type UnresponsiveHandler func(NodeSession) - -// SetUnresponsiveHandler inits the UnresponsiveHandler. -func (rm *ReplicaManager) SetUnresponsiveHandler(handler UnresponsiveHandler) { - rm.unresponsiveHandler = handler -} - -// Create a new session to the replica server if no existing one. -func (rm *ReplicaManager) GetReplica(addr string) *ReplicaSession { - rm.Lock() - defer rm.Unlock() - - if _, ok := rm.replicas[addr]; !ok { - r := &ReplicaSession{ - NodeSession: rm.creator(addr, NodeTypeReplica), - } - withUnresponsiveHandler(r.NodeSession, rm.unresponsiveHandler) - rm.replicas[addr] = r - } - return rm.replicas[addr] -} - -func NewReplicaManager(creator NodeSessionCreator) *ReplicaManager { - return &ReplicaManager{ - replicas: make(map[string]*ReplicaSession), - creator: creator, - } -} - -func (rm *ReplicaManager) Close() error { - rm.Lock() - defer rm.Unlock() - - funcs := make([]func() error, 0, len(rm.replicas)) - for _, r := range rm.replicas { - rep := r - funcs = append(funcs, func() error { - return rep.Close() - }) - } - return kerrors.AggregateGoroutines(funcs...) -} - -func (rm *ReplicaManager) ReplicaCount() int { - rm.RLock() - defer rm.RUnlock() - - return len(rm.replicas) -} diff --git a/vendor/github.com/XiaoMi/pegasus-go-client/session/session.go b/vendor/github.com/XiaoMi/pegasus-go-client/session/session.go deleted file mode 100644 index 4da5581..0000000 --- a/vendor/github.com/XiaoMi/pegasus-go-client/session/session.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright (c) 2017, Xiaomi, Inc. All rights reserved. -// This source code is licensed under the Apache License Version 2.0, which -// can be found in the LICENSE file in the root directory of this source tree. - -package session - -import ( - "context" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/XiaoMi/pegasus-go-client/idl/base" - "github.com/XiaoMi/pegasus-go-client/pegalog" - "github.com/XiaoMi/pegasus-go-client/rpc" - "gopkg.in/tomb.v2" -) - -// NodeType represents the type of the NodeSession. -type NodeType string - -const ( - // NodeTypeMeta indicates it's a session to MetaServer. - NodeTypeMeta NodeType = "meta" - - // NodeTypeReplica indicates it's a session to ReplicaServer. - NodeTypeReplica NodeType = "replica" - - kDialInterval = time.Second * 60 - - // LatencyTracingThreshold means RPC's latency higher than the threshold (1000ms) will be traced - LatencyTracingThreshold = time.Millisecond * 1000 -) - -// NodeSession represents the network session to a node -// (either a meta server or a replica server). -// It encapsulates the internal rpc processing, including -// network communication and message (de)serialization. -type NodeSession interface { - String() string - - // Invoke an rpc call. - CallWithGpid(ctx context.Context, gpid *base.Gpid, args RpcRequestArgs, name string) (result RpcResponseResult, err error) - - // Get connection state. - ConnState() rpc.ConnState - - Close() error -} - -// NodeSessionCreator creates an instance of NodeSession, -// receiving argument `string` as host address, `NodeType` -// as the type of the node. -type NodeSessionCreator func(string, NodeType) NodeSession - -// An implementation of NodeSession. -type nodeSession struct { - logger pegalog.Logger - - // atomic incremented counter that ensures each rpc - // has a unique sequence id - seqId int32 - - addr string - ntype NodeType - conn *rpc.RpcConn - - tom *tomb.Tomb - - reqc chan *requestListener - pendingResp map[int32]*requestListener - mu sync.Mutex - - redialc chan bool - lastDialTime time.Time - - codec rpc.Codec - - unresponsiveHandler UnresponsiveHandler - lastWriteTime int64 -} - -// withUnresponsiveHandler enables the session to handle the event when a network connection becomes unresponsive. -func withUnresponsiveHandler(s NodeSession, handler UnresponsiveHandler) { - ns, ok := s.(*nodeSession) - if !ok { - return - } - ns.unresponsiveHandler = handler -} - -type requestListener struct { - ch chan bool - call *PegasusRpcCall -} - -func newNodeSessionAddr(addr string, ntype NodeType) *nodeSession { - return &nodeSession{ - logger: pegalog.GetLogger(), - ntype: ntype, - seqId: 0, - codec: NewPegasusCodec(), - pendingResp: make(map[int32]*requestListener), - reqc: make(chan *requestListener), - addr: addr, - tom: &tomb.Tomb{}, - - // - redialc: make(chan bool, 1), - } -} - -// NewNodeSession always returns a non-nil value even when the -// connection attempt failed. -// Each nodeSession corresponds to an RpcConn. -func NewNodeSession(addr string, ntype NodeType) NodeSession { - return newNodeSession(addr, ntype) -} - -func newNodeSession(addr string, ntype NodeType) *nodeSession { - logger := pegalog.GetLogger() - - n := newNodeSessionAddr(addr, ntype) - logger.Printf("create session with %s", n) - - n.conn = rpc.NewRpcConn(addr) - - n.tom.Go(n.loopForDialing) - return n -} - -// thread-safe -func (n *nodeSession) ConnState() rpc.ConnState { - return n.conn.GetState() -} - -func (n *nodeSession) String() string { - return fmt.Sprintf("[%s(%s)]", n.addr, n.ntype) -} - -// Loop in background and keep watching for redialc. -// Since loopForDialing is the only consumer of redialc, it guarantees -// only 1 goroutine dialing simultaneously. -// This goroutine will not be killed due to io failure, unless the session -// is manually closed. -func (n *nodeSession) loopForDialing() error { // no error returned actually - for { - select { - case <-n.tom.Dying(): - return nil - case <-n.redialc: - if n.ConnState() != rpc.ConnStateReady { - n.dial() - } - } - } -} - -func (n *nodeSession) tryDial() { - select { - case n.redialc <- true: - default: - } -} - -// If the dialing ended successfully, it will start loopForRequest and -// loopForResponse which handle the data communications. -// If the last attempt failed, it will retry again. -func (n *nodeSession) dial() { - if time.Now().Sub(n.lastDialTime) < kDialInterval { - select { - case <-time.After(kDialInterval): - case <-n.tom.Dying(): - return - } - } - - select { - case <-n.tom.Dying(): - // ended if session closed. - default: - n.logger.Print("dial to ", n) - err := n.conn.TryConnect() - n.lastDialTime = time.Now() - - if err != nil { - n.logger.Printf("failed to dial %s: %s", n, err) - } else { - n.tom.Go(n.loopForRequest) - n.tom.Go(n.loopForResponse) - } - } - - n.logger.Printf("stop dialing for %s, connection state: %s", n, n.ConnState()) -} - -func (n *nodeSession) notifyCallerAndDrop(req *requestListener) { - select { - // notify the caller - case req.ch <- true: - n.mu.Lock() - delete(n.pendingResp, req.call.SeqId) - n.mu.Unlock() - default: - panic("impossible for concurrent notifiers") - } -} - -// single-routine worker used for sending requests. -// Any error occurred will end up this goroutine as well as the connection. -func (n *nodeSession) loopForRequest() error { // no error returned actually - for { - select { - case <-n.tom.Dying(): - return nil - case req := <-n.reqc: - n.mu.Lock() - n.pendingResp[req.call.SeqId] = req - n.mu.Unlock() - - atomic.StoreInt64(&n.lastWriteTime, time.Now().UnixNano()) - req.call.OnRpcSend = time.Now() - if err := n.writeRequest(req.call); err != nil { - n.logger.Printf("failed to send request to %s: %s", n, err) - - // notify the rpc caller. - req.call.Err = err - n.notifyCallerAndDrop(req) - - // don give up if there's still hope - if !rpc.IsNetworkTimeoutErr(err) { - return nil - } - } - } - } -} - -// hasRecentUnresponsiveWrite returns if session is active in sending tcp request but gets no response. -func (n *nodeSession) hasRecentUnresponsiveWrite() bool { - // 10s is usually the max limit that the server promises to respond. - var unresponsiveThreshold = int64(math.Max(float64(rpc.ConnReadTimeout.Nanoseconds()/2), float64(time.Second.Nanoseconds()*10))) - return time.Now().UnixNano()-atomic.LoadInt64(&n.lastWriteTime) < unresponsiveThreshold -} - -// single-routine worker used for reading response. -// We register a map of sequence id -> recvItem for each coming request, -// so that when a response is received, we are able to notify its caller. -// Any un-retryable error occurred will end up this goroutine. -func (n *nodeSession) loopForResponse() error { // no error returned actually - for { - select { - case <-n.tom.Dying(): - return nil - default: - } - - call, err := n.readResponse() - if err != nil { - if rpc.IsNetworkTimeoutErr(err) { - // If a session encounters a read-timeout, and it's simultaneously writing (depends on lastWriteTime), - // this sesion is considered as unresponsive. - // When in this state, it's in very danger with potential network failure. - if n.unresponsiveHandler != nil && n.hasRecentUnresponsiveWrite() { - n.unresponsiveHandler(n) - } - continue // retry if no data to read - } - if rpc.IsNetworkClosed(err) { // EOF - n.logger.Printf("session %s is closed by the peer", n) - return nil - } - n.logger.Printf("failed to read response from %s: %s", n, err) - return nil - } - call.OnRpcRecv = time.Now() - - n.mu.Lock() - reqListener, ok := n.pendingResp[call.SeqId] - n.mu.Unlock() - - if !ok { - n.logger.Printf("ignore stale response (seqId: %d) from %s: %s", - call.SeqId, n, call.Result) - continue - } - - reqListener.call.Err = call.Err - reqListener.call.Result = call.Result - reqListener.call.OnRpcRecv = call.OnRpcRecv - n.notifyCallerAndDrop(reqListener) - } -} - -func (n *nodeSession) waitUntilSessionReady(ctx context.Context) error { - if n.ConnState() != rpc.ConnStateReady { - dialStart := time.Now() - - n.tryDial() - - var ready bool - ticker := time.NewTicker(1 * time.Millisecond) // polling 1ms each time to minimize the connection time. - defer ticker.Stop() - for { - breakLoop := false - select { - case <-ctx.Done(): // exceeds the user timeout, or this context is cancelled, or the session transiently failed. - breakLoop = true - case <-ticker.C: - if n.ConnState() == rpc.ConnStateReady { - ready = true - breakLoop = true - } - } - if breakLoop { - break - } - } - - if !ready { - return fmt.Errorf("session %s is unable to connect (used %dms), the context error: %s", n, time.Since(dialStart)/time.Millisecond, ctx.Err()) - } - } - return nil -} - -func (n *nodeSession) CallWithGpid(ctx context.Context, gpid *base.Gpid, args RpcRequestArgs, name string) (result RpcResponseResult, err error) { - // either the ctx cancelled or the tomb killed will stop this rpc call. - ctxWithTomb := n.tom.Context(ctx) - if err := n.waitUntilSessionReady(ctxWithTomb); err != nil { - return nil, err - } - - seqId := atomic.AddInt32(&n.seqId, 1) // increment sequence id - rcall, err := MarshallPegasusRpc(n.codec, seqId, gpid, args, name) - if err != nil { - return nil, err - } - rcall.OnRpcCall = time.Now() - - req := &requestListener{call: rcall, ch: make(chan bool, 1)} - - defer func() { - // manually trigger gc - rcall = nil - req = nil - }() - - select { - // passes the request to loopForRequest - case n.reqc <- req: - select { - // receive from loopForResponse, or loopRequest failed - case <-req.ch: - err = rcall.Err - result = rcall.Result - if rcall.TilNow() > LatencyTracingThreshold { - n.logger.Printf("[%s(%s)] trace to %s: %s", rcall.Name, rcall.Gpid, n, rcall.Trace()) - } - return - case <-ctxWithTomb.Done(): - err = ctxWithTomb.Err() - result = nil - return - } - case <-ctxWithTomb.Done(): - err = ctxWithTomb.Err() - result = nil - return - } -} - -func (n *nodeSession) writeRequest(r *PegasusRpcCall) error { - return n.conn.Write(r.RawReq) -} - -// readResponse never returns nil `PegasusRpcCall` unless the tcp round trip failed. -// The pegasus server may in some cases respond with a not-ERR_OK error code (together with -// sequence id and rpc name) while without a transport-layer failure. -func (n *nodeSession) readResponse() (*PegasusRpcCall, error) { - return ReadRpcResponse(n.conn, n.codec) -} - -func (n *nodeSession) Close() error { - n.mu.Lock() - if n.ConnState() != rpc.ConnStateClosed { - n.logger.Printf("close session %s", n) - n.conn.Close() - n.tom.Kill(nil) - } - n.mu.Unlock() - - return n.tom.Wait() -} diff --git a/vendor/github.com/allegro/bigcache/v3/.codecov.yml b/vendor/github.com/allegro/bigcache/v3/.codecov.yml deleted file mode 100644 index d8c862e..0000000 --- a/vendor/github.com/allegro/bigcache/v3/.codecov.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -codecov: - require_ci_to_pass: true -comment: - behavior: default - layout: reach, diff, flags, files, footer - require_base: false - require_changes: false - require_head: true -coverage: - precision: 2 - range: - - 70 - - 100 - round: down - status: - changes: false - patch: true - project: true -parsers: - gcov: - branch_detection: - conditional: true - loop: true - macro: false - method: false - javascript: - enable_partials: false diff --git a/vendor/github.com/allegro/bigcache/v3/.gitignore b/vendor/github.com/allegro/bigcache/v3/.gitignore deleted file mode 100644 index 7886915..0000000 --- a/vendor/github.com/allegro/bigcache/v3/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -.idea -.DS_Store -/server/server.exe -/server/server -/server/server_dar* -/server/server_fre* -/server/server_win* -/server/server_net* -/server/server_ope* -/server/server_lin* -CHANGELOG.md diff --git a/vendor/github.com/allegro/bigcache/v3/LICENSE b/vendor/github.com/allegro/bigcache/v3/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/github.com/allegro/bigcache/v3/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/allegro/bigcache/v3/README.md b/vendor/github.com/allegro/bigcache/v3/README.md deleted file mode 100644 index 253749b..0000000 --- a/vendor/github.com/allegro/bigcache/v3/README.md +++ /dev/null @@ -1,204 +0,0 @@ -# BigCache [![Build Status](https://github.com/allegro/bigcache/workflows/build/badge.svg)](https://github.com/allegro/bigcache/actions?query=workflow%3Abuild) [![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=master)](https://coveralls.io/github/allegro/bigcache?branch=master) [![GoDoc](https://godoc.org/github.com/allegro/bigcache/v3?status.svg)](https://godoc.org/github.com/allegro/bigcache/v3) [![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache/v3)](https://goreportcard.com/report/github.com/allegro/bigcache/v3) - -Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance. -BigCache keeps entries on heap but omits GC for them. To achieve that, operations on byte slices take place, -therefore entries (de)serialization in front of the cache will be needed in most use cases. - -Requires Go 1.12 or newer. - -## Usage - -### Simple initialization - -```go -import ( - "fmt" - "context" - "github.com/allegro/bigcache/v3" -) - -cache, _ := bigcache.New(context.Background(), bigcache.DefaultConfig(10 * time.Minute)) - -cache.Set("my-unique-key", []byte("value")) - -entry, _ := cache.Get("my-unique-key") -fmt.Println(string(entry)) -``` - -### Custom initialization - -When cache load can be predicted in advance then it is better to use custom initialization because additional memory -allocation can be avoided in that way. - -```go -import ( - "log" - - "github.com/allegro/bigcache/v3" -) - -config := bigcache.Config { - // number of shards (must be a power of 2) - Shards: 1024, - - // time after which entry can be evicted - LifeWindow: 10 * time.Minute, - - // Interval between removing expired entries (clean up). - // If set to <= 0 then no action is performed. - // Setting to < 1 second is counterproductive — bigcache has a one second resolution. - CleanWindow: 5 * time.Minute, - - // rps * lifeWindow, used only in initial memory allocation - MaxEntriesInWindow: 1000 * 10 * 60, - - // max entry size in bytes, used only in initial memory allocation - MaxEntrySize: 500, - - // prints information about additional memory allocation - Verbose: true, - - // cache will not allocate more memory than this limit, value in MB - // if value is reached then the oldest entries can be overridden for the new ones - // 0 value means no size limit - HardMaxCacheSize: 8192, - - // callback fired when the oldest entry is removed because of its expiration time or no space left - // for the new entry, or because delete was called. A bitmask representing the reason will be returned. - // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. - OnRemove: nil, - - // OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left - // for the new entry, or because delete was called. A constant representing the reason will be passed through. - // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. - // Ignored if OnRemove is specified. - OnRemoveWithReason: nil, - } - -cache, initErr := bigcache.New(context.Background(), config) -if initErr != nil { - log.Fatal(initErr) -} - -cache.Set("my-unique-key", []byte("value")) - -if entry, err := cache.Get("my-unique-key"); err == nil { - fmt.Println(string(entry)) -} -``` - -### `LifeWindow` & `CleanWindow` - -1. `LifeWindow` is a time. After that time, an entry can be called dead but not deleted. - -2. `CleanWindow` is a time. After that time, all the dead entries will be deleted, but not the entries that still have life. - -## [Benchmarks](https://github.com/allegro/bigcache-bench) - -Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map. -Benchmark tests were made using an -i7-6700K CPU @ 4.00GHz with 32GB of RAM on Ubuntu 18.04 LTS (5.2.12-050212-generic). - -Benchmarks source code can be found [here](https://github.com/allegro/bigcache-bench) - -### Writes and reads - -```bash -go version -go version go1.13 linux/amd64 - -go test -bench=. -benchmem -benchtime=4s ./... -timeout 30m -goos: linux -goarch: amd64 -pkg: github.com/allegro/bigcache/v3/caches_bench -BenchmarkMapSet-8 12999889 376 ns/op 199 B/op 3 allocs/op -BenchmarkConcurrentMapSet-8 4355726 1275 ns/op 337 B/op 8 allocs/op -BenchmarkFreeCacheSet-8 11068976 703 ns/op 328 B/op 2 allocs/op -BenchmarkBigCacheSet-8 10183717 478 ns/op 304 B/op 2 allocs/op -BenchmarkMapGet-8 16536015 324 ns/op 23 B/op 1 allocs/op -BenchmarkConcurrentMapGet-8 13165708 401 ns/op 24 B/op 2 allocs/op -BenchmarkFreeCacheGet-8 10137682 690 ns/op 136 B/op 2 allocs/op -BenchmarkBigCacheGet-8 11423854 450 ns/op 152 B/op 4 allocs/op -BenchmarkBigCacheSetParallel-8 34233472 148 ns/op 317 B/op 3 allocs/op -BenchmarkFreeCacheSetParallel-8 34222654 268 ns/op 350 B/op 3 allocs/op -BenchmarkConcurrentMapSetParallel-8 19635688 240 ns/op 200 B/op 6 allocs/op -BenchmarkBigCacheGetParallel-8 60547064 86.1 ns/op 152 B/op 4 allocs/op -BenchmarkFreeCacheGetParallel-8 50701280 147 ns/op 136 B/op 3 allocs/op -BenchmarkConcurrentMapGetParallel-8 27353288 175 ns/op 24 B/op 2 allocs/op -PASS -ok github.com/allegro/bigcache/v3/caches_bench 256.257s -``` - -Writes and reads in bigcache are faster than in freecache. -Writes to map are the slowest. - -### GC pause time - -```bash -go version -go version go1.13 linux/amd64 - -go run caches_gc_overhead_comparison.go - -Number of entries: 20000000 -GC pause for bigcache: 1.506077ms -GC pause for freecache: 5.594416ms -GC pause for map: 9.347015ms -``` - -``` -go version -go version go1.13 linux/arm64 - -go run caches_gc_overhead_comparison.go -Number of entries: 20000000 -GC pause for bigcache: 22.382827ms -GC pause for freecache: 41.264651ms -GC pause for map: 72.236853ms -``` - -Test shows how long are the GC pauses for caches filled with 20mln of entries. -Bigcache and freecache have very similar GC pause time. - -### Memory usage - -You may encounter system memory reporting what appears to be an exponential increase, however this is expected behaviour. Go runtime allocates memory in chunks or 'spans' and will inform the OS when they are no longer required by changing their state to 'idle'. The 'spans' will remain part of the process resource usage until the OS needs to repurpose the address. Further reading available [here](https://utcc.utoronto.ca/~cks/space/blog/programming/GoNoMemoryFreeing). - -## How it works - -BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)). -This optimization states that if map without pointers in keys and values is used then GC will omit its content. -Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries. - -Entries are kept in byte slices, to omit GC again. -Byte slices size can grow to gigabytes without impact on performance -because GC will only see single pointer to it. - -### Collisions - -BigCache does not handle collisions. When new item is inserted and it's hash collides with previously stored item, new item overwrites previously stored value. - -## Bigcache vs Freecache - -Both caches provide the same core features but they reduce GC overhead in different ways. -Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on -slices to reduce number of pointers. - -Results from benchmark tests are presented above. -One of the advantage of bigcache over freecache is that you don’t need to know -the size of the cache in advance, because when bigcache is full, -it can allocate additional memory for new entries instead of -overwriting existing ones as freecache does currently. -However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config). - -## HTTP Server - -This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package. - -## More - -Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html) - -## License - -BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE)) diff --git a/vendor/github.com/allegro/bigcache/v3/bigcache.go b/vendor/github.com/allegro/bigcache/v3/bigcache.go deleted file mode 100644 index 17e2aca..0000000 --- a/vendor/github.com/allegro/bigcache/v3/bigcache.go +++ /dev/null @@ -1,270 +0,0 @@ -package bigcache - -import ( - "context" - "fmt" - "time" -) - -const ( - minimumEntriesInShard = 10 // Minimum number of entries in single shard -) - -// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance. -// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays, -// therefore entries (de)serialization in front of the cache will be needed in most use cases. -type BigCache struct { - shards []*cacheShard - lifeWindow uint64 - clock clock - hash Hasher - config Config - shardMask uint64 - close chan struct{} -} - -// Response will contain metadata about the entry for which GetWithInfo(key) was called -type Response struct { - EntryStatus RemoveReason -} - -// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback. -type RemoveReason uint32 - -const ( - // Expired means the key is past its LifeWindow. - Expired = RemoveReason(1) - // NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the - // entry exceeded the maximum shard size. - NoSpace = RemoveReason(2) - // Deleted means Delete was called and this key was removed as a result. - Deleted = RemoveReason(3) -) - -// New initialize new instance of BigCache -func New(ctx context.Context, config Config) (*BigCache, error) { - return newBigCache(ctx, config, &systemClock{}) -} - -// NewBigCache initialize new instance of BigCache -// -// Deprecated: NewBigCache is deprecated, please use New(ctx, config) instead, -// New takes in context and can gracefully -// shutdown with context cancellations -func NewBigCache(config Config) (*BigCache, error) { - return newBigCache(context.Background(), config, &systemClock{}) -} - -func newBigCache(ctx context.Context, config Config, clock clock) (*BigCache, error) { - if !isPowerOfTwo(config.Shards) { - return nil, fmt.Errorf("Shards number must be power of two") - } - if config.MaxEntrySize < 0 { - return nil, fmt.Errorf("MaxEntrySize must be >= 0") - } - if config.MaxEntriesInWindow < 0 { - return nil, fmt.Errorf("MaxEntriesInWindow must be >= 0") - } - if config.HardMaxCacheSize < 0 { - return nil, fmt.Errorf("HardMaxCacheSize must be >= 0") - } - - if config.Hasher == nil { - config.Hasher = newDefaultHasher() - } - - cache := &BigCache{ - shards: make([]*cacheShard, config.Shards), - lifeWindow: uint64(config.LifeWindow.Seconds()), - clock: clock, - hash: config.Hasher, - config: config, - shardMask: uint64(config.Shards - 1), - close: make(chan struct{}), - } - - var onRemove func(wrappedEntry []byte, reason RemoveReason) - if config.OnRemoveWithMetadata != nil { - onRemove = cache.providedOnRemoveWithMetadata - } else if config.OnRemove != nil { - onRemove = cache.providedOnRemove - } else if config.OnRemoveWithReason != nil { - onRemove = cache.providedOnRemoveWithReason - } else { - onRemove = cache.notProvidedOnRemove - } - - for i := 0; i < config.Shards; i++ { - cache.shards[i] = initNewShard(config, onRemove, clock) - } - - if config.CleanWindow > 0 { - go func() { - ticker := time.NewTicker(config.CleanWindow) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - fmt.Println("ctx done, shutting down bigcache cleanup routine") - return - case t := <-ticker.C: - cache.cleanUp(uint64(t.Unix())) - case <-cache.close: - return - } - } - }() - } - - return cache, nil -} - -// Close is used to signal a shutdown of the cache when you are done with it. -// This allows the cleaning goroutines to exit and ensures references are not -// kept to the cache preventing GC of the entire cache. -func (c *BigCache) Close() error { - close(c.close) - return nil -} - -// Get reads entry for the key. -// It returns an ErrEntryNotFound when -// no entry exists for the given key. -func (c *BigCache) Get(key string) ([]byte, error) { - hashedKey := c.hash.Sum64(key) - shard := c.getShard(hashedKey) - return shard.get(key, hashedKey) -} - -// GetWithInfo reads entry for the key with Response info. -// It returns an ErrEntryNotFound when -// no entry exists for the given key. -func (c *BigCache) GetWithInfo(key string) ([]byte, Response, error) { - hashedKey := c.hash.Sum64(key) - shard := c.getShard(hashedKey) - return shard.getWithInfo(key, hashedKey) -} - -// Set saves entry under the key -func (c *BigCache) Set(key string, entry []byte) error { - hashedKey := c.hash.Sum64(key) - shard := c.getShard(hashedKey) - return shard.set(key, hashedKey, entry) -} - -// Append appends entry under the key if key exists, otherwise -// it will set the key (same behaviour as Set()). With Append() you can -// concatenate multiple entries under the same key in an lock optimized way. -func (c *BigCache) Append(key string, entry []byte) error { - hashedKey := c.hash.Sum64(key) - shard := c.getShard(hashedKey) - return shard.append(key, hashedKey, entry) -} - -// Delete removes the key -func (c *BigCache) Delete(key string) error { - hashedKey := c.hash.Sum64(key) - shard := c.getShard(hashedKey) - return shard.del(hashedKey) -} - -// Reset empties all cache shards -func (c *BigCache) Reset() error { - for _, shard := range c.shards { - shard.reset(c.config) - } - return nil -} - -// ResetStats resets cache stats -func (c *BigCache) ResetStats() error { - for _, shard := range c.shards { - shard.resetStats() - } - return nil -} - -// Len computes number of entries in cache -func (c *BigCache) Len() int { - var len int - for _, shard := range c.shards { - len += shard.len() - } - return len -} - -// Capacity returns amount of bytes store in the cache. -func (c *BigCache) Capacity() int { - var len int - for _, shard := range c.shards { - len += shard.capacity() - } - return len -} - -// Stats returns cache's statistics -func (c *BigCache) Stats() Stats { - var s Stats - for _, shard := range c.shards { - tmp := shard.getStats() - s.Hits += tmp.Hits - s.Misses += tmp.Misses - s.DelHits += tmp.DelHits - s.DelMisses += tmp.DelMisses - s.Collisions += tmp.Collisions - } - return s -} - -// KeyMetadata returns number of times a cached resource was requested. -func (c *BigCache) KeyMetadata(key string) Metadata { - hashedKey := c.hash.Sum64(key) - shard := c.getShard(hashedKey) - return shard.getKeyMetadataWithLock(hashedKey) -} - -// Iterator returns iterator function to iterate over EntryInfo's from whole cache. -func (c *BigCache) Iterator() *EntryInfoIterator { - return newIterator(c) -} - -func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { - oldestTimestamp := readTimestampFromEntry(oldestEntry) - if currentTimestamp < oldestTimestamp { - return false - } - if currentTimestamp-oldestTimestamp > c.lifeWindow { - evict(Expired) - return true - } - return false -} - -func (c *BigCache) cleanUp(currentTimestamp uint64) { - for _, shard := range c.shards { - shard.cleanUp(currentTimestamp) - } -} - -func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) { - return c.shards[hashedKey&c.shardMask] -} - -func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) { - c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry)) -} - -func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) { - if c.config.onRemoveFilter == 0 || (1< 0 { - c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason) - } -} - -func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) { -} - -func (c *BigCache) providedOnRemoveWithMetadata(wrappedEntry []byte, reason RemoveReason) { - hashedKey := c.hash.Sum64(readKeyFromEntry(wrappedEntry)) - shard := c.getShard(hashedKey) - c.config.OnRemoveWithMetadata(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), shard.getKeyMetadata(hashedKey)) -} diff --git a/vendor/github.com/allegro/bigcache/v3/bytes.go b/vendor/github.com/allegro/bigcache/v3/bytes.go deleted file mode 100644 index 2bf2e9a..0000000 --- a/vendor/github.com/allegro/bigcache/v3/bytes.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !appengine - -package bigcache - -import ( - "unsafe" -) - -func bytesToString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} diff --git a/vendor/github.com/allegro/bigcache/v3/bytes_appengine.go b/vendor/github.com/allegro/bigcache/v3/bytes_appengine.go deleted file mode 100644 index 3892f3b..0000000 --- a/vendor/github.com/allegro/bigcache/v3/bytes_appengine.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build appengine - -package bigcache - -func bytesToString(b []byte) string { - return string(b) -} diff --git a/vendor/github.com/allegro/bigcache/v3/clock.go b/vendor/github.com/allegro/bigcache/v3/clock.go deleted file mode 100644 index 195d01a..0000000 --- a/vendor/github.com/allegro/bigcache/v3/clock.go +++ /dev/null @@ -1,14 +0,0 @@ -package bigcache - -import "time" - -type clock interface { - Epoch() int64 -} - -type systemClock struct { -} - -func (c systemClock) Epoch() int64 { - return time.Now().Unix() -} diff --git a/vendor/github.com/allegro/bigcache/v3/config.go b/vendor/github.com/allegro/bigcache/v3/config.go deleted file mode 100644 index 63a4e9b..0000000 --- a/vendor/github.com/allegro/bigcache/v3/config.go +++ /dev/null @@ -1,97 +0,0 @@ -package bigcache - -import "time" - -// Config for BigCache -type Config struct { - // Number of cache shards, value must be a power of two - Shards int - // Time after which entry can be evicted - LifeWindow time.Duration - // Interval between removing expired entries (clean up). - // If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution. - CleanWindow time.Duration - // Max number of entries in life window. Used only to calculate initial size for cache shards. - // When proper value is set then additional memory allocation does not occur. - MaxEntriesInWindow int - // Max size of entry in bytes. Used only to calculate initial size for cache shards. - MaxEntrySize int - // StatsEnabled if true calculate the number of times a cached resource was requested. - StatsEnabled bool - // Verbose mode prints information about new memory allocation - Verbose bool - // Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used. - Hasher Hasher - // HardMaxCacheSize is a limit for BytesQueue size in MB. - // It can protect application from consuming all available memory on machine, therefore from running OOM Killer. - // Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then - // the oldest entries are overridden for the new ones. The max memory consumption will be bigger than - // HardMaxCacheSize due to Shards' s additional memory. Every Shard consumes additional memory for map of keys - // and statistics (map[uint64]uint32) the size of this map is equal to number of entries in - // cache ~ 2×(64+32)×n bits + overhead or map itself. - HardMaxCacheSize int - // OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left - // for the new entry, or because delete was called. - // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. - // ignored if OnRemoveWithMetadata is specified. - OnRemove func(key string, entry []byte) - // OnRemoveWithMetadata is a callback fired when the oldest entry is removed because of its expiration time or no space left - // for the new entry, or because delete was called. A structure representing details about that specific entry. - // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. - OnRemoveWithMetadata func(key string, entry []byte, keyMetadata Metadata) - // OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left - // for the new entry, or because delete was called. A constant representing the reason will be passed through. - // Default value is nil which means no callback and it prevents from unwrapping the oldest entry. - // Ignored if OnRemove is specified. - OnRemoveWithReason func(key string, entry []byte, reason RemoveReason) - - onRemoveFilter int - - // Logger is a logging interface and used in combination with `Verbose` - // Defaults to `DefaultLogger()` - Logger Logger -} - -// DefaultConfig initializes config with default values. -// When load for BigCache can be predicted in advance then it is better to use custom config. -func DefaultConfig(eviction time.Duration) Config { - return Config{ - Shards: 1024, - LifeWindow: eviction, - CleanWindow: 1 * time.Second, - MaxEntriesInWindow: 1000 * 10 * 60, - MaxEntrySize: 500, - StatsEnabled: false, - Verbose: true, - Hasher: newDefaultHasher(), - HardMaxCacheSize: 0, - Logger: DefaultLogger(), - } -} - -// initialShardSize computes initial shard size -func (c Config) initialShardSize() int { - return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard) -} - -// maximumShardSizeInBytes computes maximum shard size in bytes -func (c Config) maximumShardSizeInBytes() int { - maxShardSize := 0 - - if c.HardMaxCacheSize > 0 { - maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards - } - - return maxShardSize -} - -// OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason. -// Filtering out reasons prevents bigcache from unwrapping them, which saves cpu. -func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config { - c.onRemoveFilter = 0 - for i := range reasons { - c.onRemoveFilter |= 1 << uint(reasons[i]) - } - - return c -} diff --git a/vendor/github.com/allegro/bigcache/v3/encoding.go b/vendor/github.com/allegro/bigcache/v3/encoding.go deleted file mode 100644 index fc86125..0000000 --- a/vendor/github.com/allegro/bigcache/v3/encoding.go +++ /dev/null @@ -1,83 +0,0 @@ -package bigcache - -import ( - "encoding/binary" -) - -const ( - timestampSizeInBytes = 8 // Number of bytes used for timestamp - hashSizeInBytes = 8 // Number of bytes used for hash - keySizeInBytes = 2 // Number of bytes used for size of entry key - headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers -) - -func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte { - keyLength := len(key) - blobLength := len(entry) + headersSizeInBytes + keyLength - - if blobLength > len(*buffer) { - *buffer = make([]byte, blobLength) - } - blob := *buffer - - binary.LittleEndian.PutUint64(blob, timestamp) - binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash) - binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength)) - copy(blob[headersSizeInBytes:], key) - copy(blob[headersSizeInBytes+keyLength:], entry) - - return blob[:blobLength] -} - -func appendToWrappedEntry(timestamp uint64, wrappedEntry []byte, entry []byte, buffer *[]byte) []byte { - blobLength := len(wrappedEntry) + len(entry) - if blobLength > len(*buffer) { - *buffer = make([]byte, blobLength) - } - - blob := *buffer - - binary.LittleEndian.PutUint64(blob, timestamp) - copy(blob[timestampSizeInBytes:], wrappedEntry[timestampSizeInBytes:]) - copy(blob[len(wrappedEntry):], entry) - - return blob[:blobLength] -} - -func readEntry(data []byte) []byte { - length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) - - // copy on read - dst := make([]byte, len(data)-int(headersSizeInBytes+length)) - copy(dst, data[headersSizeInBytes+length:]) - - return dst -} - -func readTimestampFromEntry(data []byte) uint64 { - return binary.LittleEndian.Uint64(data) -} - -func readKeyFromEntry(data []byte) string { - length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) - - // copy on read - dst := make([]byte, length) - copy(dst, data[headersSizeInBytes:headersSizeInBytes+length]) - - return bytesToString(dst) -} - -func compareKeyFromEntry(data []byte, key string) bool { - length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) - - return bytesToString(data[headersSizeInBytes:headersSizeInBytes+length]) == key -} - -func readHashFromEntry(data []byte) uint64 { - return binary.LittleEndian.Uint64(data[timestampSizeInBytes:]) -} - -func resetKeyFromEntry(data []byte) { - binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0) -} diff --git a/vendor/github.com/allegro/bigcache/v3/entry_not_found_error.go b/vendor/github.com/allegro/bigcache/v3/entry_not_found_error.go deleted file mode 100644 index 8993384..0000000 --- a/vendor/github.com/allegro/bigcache/v3/entry_not_found_error.go +++ /dev/null @@ -1,8 +0,0 @@ -package bigcache - -import "errors" - -var ( - // ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key - ErrEntryNotFound = errors.New("Entry not found") -) diff --git a/vendor/github.com/allegro/bigcache/v3/fnv.go b/vendor/github.com/allegro/bigcache/v3/fnv.go deleted file mode 100644 index 188c9aa..0000000 --- a/vendor/github.com/allegro/bigcache/v3/fnv.go +++ /dev/null @@ -1,28 +0,0 @@ -package bigcache - -// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations. -// Its Sum64 method will lay the value out in big-endian byte order. -// See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function -func newDefaultHasher() Hasher { - return fnv64a{} -} - -type fnv64a struct{} - -const ( - // offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash - offset64 = 14695981039346656037 - // prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash - prime64 = 1099511628211 -) - -// Sum64 gets the string and returns its uint64 hash value. -func (f fnv64a) Sum64(key string) uint64 { - var hash uint64 = offset64 - for i := 0; i < len(key); i++ { - hash ^= uint64(key[i]) - hash *= prime64 - } - - return hash -} diff --git a/vendor/github.com/allegro/bigcache/v3/hash.go b/vendor/github.com/allegro/bigcache/v3/hash.go deleted file mode 100644 index 5f8ade7..0000000 --- a/vendor/github.com/allegro/bigcache/v3/hash.go +++ /dev/null @@ -1,8 +0,0 @@ -package bigcache - -// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions -// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e. -// you can use FarmHash family). -type Hasher interface { - Sum64(string) uint64 -} diff --git a/vendor/github.com/allegro/bigcache/v3/iterator.go b/vendor/github.com/allegro/bigcache/v3/iterator.go deleted file mode 100644 index db2a2ef..0000000 --- a/vendor/github.com/allegro/bigcache/v3/iterator.go +++ /dev/null @@ -1,146 +0,0 @@ -package bigcache - -import ( - "sync" -) - -type iteratorError string - -func (e iteratorError) Error() string { - return string(e) -} - -// ErrInvalidIteratorState is reported when iterator is in invalid state -const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position") - -// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying -const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache") - -var emptyEntryInfo = EntryInfo{} - -// EntryInfo holds informations about entry in the cache -type EntryInfo struct { - timestamp uint64 - hash uint64 - key string - value []byte - err error -} - -// Key returns entry's underlying key -func (e EntryInfo) Key() string { - return e.key -} - -// Hash returns entry's hash value -func (e EntryInfo) Hash() uint64 { - return e.hash -} - -// Timestamp returns entry's timestamp (time of insertion) -func (e EntryInfo) Timestamp() uint64 { - return e.timestamp -} - -// Value returns entry's underlying value -func (e EntryInfo) Value() []byte { - return e.value -} - -// EntryInfoIterator allows to iterate over entries in the cache -type EntryInfoIterator struct { - mutex sync.Mutex - cache *BigCache - currentShard int - currentIndex int - currentEntryInfo EntryInfo - elements []uint64 - elementsCount int - valid bool -} - -// SetNext moves to next element and returns true if it exists. -func (it *EntryInfoIterator) SetNext() bool { - it.mutex.Lock() - - it.valid = false - it.currentIndex++ - - if it.elementsCount > it.currentIndex { - it.valid = true - - empty := it.setCurrentEntry() - it.mutex.Unlock() - - if empty { - return it.SetNext() - } - return true - } - - for i := it.currentShard + 1; i < it.cache.config.Shards; i++ { - it.elements, it.elementsCount = it.cache.shards[i].copyHashedKeys() - - // Non empty shard - stick with it - if it.elementsCount > 0 { - it.currentIndex = 0 - it.currentShard = i - it.valid = true - - empty := it.setCurrentEntry() - it.mutex.Unlock() - - if empty { - return it.SetNext() - } - return true - } - } - it.mutex.Unlock() - return false -} - -func (it *EntryInfoIterator) setCurrentEntry() bool { - var entryNotFound = false - entry, err := it.cache.shards[it.currentShard].getEntry(it.elements[it.currentIndex]) - - if err == ErrEntryNotFound { - it.currentEntryInfo = emptyEntryInfo - entryNotFound = true - } else if err != nil { - it.currentEntryInfo = EntryInfo{ - err: err, - } - } else { - it.currentEntryInfo = EntryInfo{ - timestamp: readTimestampFromEntry(entry), - hash: readHashFromEntry(entry), - key: readKeyFromEntry(entry), - value: readEntry(entry), - err: err, - } - } - - return entryNotFound -} - -func newIterator(cache *BigCache) *EntryInfoIterator { - elements, count := cache.shards[0].copyHashedKeys() - - return &EntryInfoIterator{ - cache: cache, - currentShard: 0, - currentIndex: -1, - elements: elements, - elementsCount: count, - } -} - -// Value returns current value from the iterator -func (it *EntryInfoIterator) Value() (EntryInfo, error) { - if !it.valid { - return emptyEntryInfo, ErrInvalidIteratorState - } - - return it.currentEntryInfo, it.currentEntryInfo.err -} diff --git a/vendor/github.com/allegro/bigcache/v3/logger.go b/vendor/github.com/allegro/bigcache/v3/logger.go deleted file mode 100644 index 50e84ab..0000000 --- a/vendor/github.com/allegro/bigcache/v3/logger.go +++ /dev/null @@ -1,30 +0,0 @@ -package bigcache - -import ( - "log" - "os" -) - -// Logger is invoked when `Config.Verbose=true` -type Logger interface { - Printf(format string, v ...interface{}) -} - -// this is a safeguard, breaking on compile time in case -// `log.Logger` does not adhere to our `Logger` interface. -// see https://golang.org/doc/faq#guarantee_satisfies_interface -var _ Logger = &log.Logger{} - -// DefaultLogger returns a `Logger` implementation -// backed by stdlib's log -func DefaultLogger() *log.Logger { - return log.New(os.Stdout, "", log.LstdFlags) -} - -func newLogger(custom Logger) Logger { - if custom != nil { - return custom - } - - return DefaultLogger() -} diff --git a/vendor/github.com/allegro/bigcache/v3/queue/bytes_queue.go b/vendor/github.com/allegro/bigcache/v3/queue/bytes_queue.go deleted file mode 100644 index 3ef0f6d..0000000 --- a/vendor/github.com/allegro/bigcache/v3/queue/bytes_queue.go +++ /dev/null @@ -1,269 +0,0 @@ -package queue - -import ( - "encoding/binary" - "log" - "time" -) - -const ( - // Number of bytes to encode 0 in uvarint format - minimumHeaderSize = 17 // 1 byte blobsize + timestampSizeInBytes + hashSizeInBytes - // Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index - leftMarginIndex = 1 -) - -var ( - errEmptyQueue = &queueError{"Empty queue"} - errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."} - errIndexOutOfBounds = &queueError{"Index out of range"} -) - -// BytesQueue is a non-thread safe queue type of fifo based on bytes array. -// For every push operation index of entry is returned. It can be used to read the entry later -type BytesQueue struct { - full bool - array []byte - capacity int - maxCapacity int - head int - tail int - count int - rightMargin int - headerBuffer []byte - verbose bool -} - -type queueError struct { - message string -} - -// getNeededSize returns the number of bytes an entry of length need in the queue -func getNeededSize(length int) int { - var header int - switch { - case length < 127: // 1<<7-1 - header = 1 - case length < 16382: // 1<<14-2 - header = 2 - case length < 2097149: // 1<<21 -3 - header = 3 - case length < 268435452: // 1<<28 -4 - header = 4 - default: - header = 5 - } - - return length + header -} - -// NewBytesQueue initialize new bytes queue. -// capacity is used in bytes array allocation -// When verbose flag is set then information about memory allocation are printed -func NewBytesQueue(capacity int, maxCapacity int, verbose bool) *BytesQueue { - return &BytesQueue{ - array: make([]byte, capacity), - capacity: capacity, - maxCapacity: maxCapacity, - headerBuffer: make([]byte, binary.MaxVarintLen32), - tail: leftMarginIndex, - head: leftMarginIndex, - rightMargin: leftMarginIndex, - verbose: verbose, - } -} - -// Reset removes all entries from queue -func (q *BytesQueue) Reset() { - // Just reset indexes - q.tail = leftMarginIndex - q.head = leftMarginIndex - q.rightMargin = leftMarginIndex - q.count = 0 - q.full = false -} - -// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed. -// Returns index for pushed data or error if maximum size queue limit is reached. -func (q *BytesQueue) Push(data []byte) (int, error) { - neededSize := getNeededSize(len(data)) - - if !q.canInsertAfterTail(neededSize) { - if q.canInsertBeforeHead(neededSize) { - q.tail = leftMarginIndex - } else if q.capacity+neededSize >= q.maxCapacity && q.maxCapacity > 0 { - return -1, &queueError{"Full queue. Maximum size limit reached."} - } else { - q.allocateAdditionalMemory(neededSize) - } - } - - index := q.tail - - q.push(data, neededSize) - - return index, nil -} - -func (q *BytesQueue) allocateAdditionalMemory(minimum int) { - start := time.Now() - if q.capacity < minimum { - q.capacity += minimum - } - q.capacity = q.capacity * 2 - if q.capacity > q.maxCapacity && q.maxCapacity > 0 { - q.capacity = q.maxCapacity - } - - oldArray := q.array - q.array = make([]byte, q.capacity) - - if leftMarginIndex != q.rightMargin { - copy(q.array, oldArray[:q.rightMargin]) - - if q.tail <= q.head { - if q.tail != q.head { - // created slice is slightly larger then need but this is fine after only the needed bytes are copied - q.push(make([]byte, q.head-q.tail), q.head-q.tail) - } - - q.head = leftMarginIndex - q.tail = q.rightMargin - } - } - - q.full = false - - if q.verbose { - log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity) - } -} - -func (q *BytesQueue) push(data []byte, len int) { - headerEntrySize := binary.PutUvarint(q.headerBuffer, uint64(len)) - q.copy(q.headerBuffer, headerEntrySize) - - q.copy(data, len-headerEntrySize) - - if q.tail > q.head { - q.rightMargin = q.tail - } - if q.tail == q.head { - q.full = true - } - - q.count++ -} - -func (q *BytesQueue) copy(data []byte, len int) { - q.tail += copy(q.array[q.tail:], data[:len]) -} - -// Pop reads the oldest entry from queue and moves head pointer to the next one -func (q *BytesQueue) Pop() ([]byte, error) { - data, blockSize, err := q.peek(q.head) - if err != nil { - return nil, err - } - - q.head += blockSize - q.count-- - - if q.head == q.rightMargin { - q.head = leftMarginIndex - if q.tail == q.rightMargin { - q.tail = leftMarginIndex - } - q.rightMargin = q.tail - } - - q.full = false - - return data, nil -} - -// Peek reads the oldest entry from list without moving head pointer -func (q *BytesQueue) Peek() ([]byte, error) { - data, _, err := q.peek(q.head) - return data, err -} - -// Get reads entry from index -func (q *BytesQueue) Get(index int) ([]byte, error) { - data, _, err := q.peek(index) - return data, err -} - -// CheckGet checks if an entry can be read from index -func (q *BytesQueue) CheckGet(index int) error { - return q.peekCheckErr(index) -} - -// Capacity returns number of allocated bytes for queue -func (q *BytesQueue) Capacity() int { - return q.capacity -} - -// Len returns number of entries kept in queue -func (q *BytesQueue) Len() int { - return q.count -} - -// Error returns error message -func (e *queueError) Error() string { - return e.message -} - -// peekCheckErr is identical to peek, but does not actually return any data -func (q *BytesQueue) peekCheckErr(index int) error { - - if q.count == 0 { - return errEmptyQueue - } - - if index <= 0 { - return errInvalidIndex - } - - if index >= len(q.array) { - return errIndexOutOfBounds - } - return nil -} - -// peek returns the data from index and the number of bytes to encode the length of the data in uvarint format -func (q *BytesQueue) peek(index int) ([]byte, int, error) { - err := q.peekCheckErr(index) - if err != nil { - return nil, 0, err - } - - blockSize, n := binary.Uvarint(q.array[index:]) - return q.array[index+n : index+int(blockSize)], int(blockSize), nil -} - -// canInsertAfterTail returns true if it's possible to insert an entry of size of need after the tail of the queue -func (q *BytesQueue) canInsertAfterTail(need int) bool { - if q.full { - return false - } - if q.tail >= q.head { - return q.capacity-q.tail >= need - } - // 1. there is exactly need bytes between head and tail, so we do not need - // to reserve extra space for a potential empty entry when realloc this queue - // 2. still have unused space between tail and head, then we must reserve - // at least headerEntrySize bytes so we can put an empty entry - return q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize -} - -// canInsertBeforeHead returns true if it's possible to insert an entry of size of need before the head of the queue -func (q *BytesQueue) canInsertBeforeHead(need int) bool { - if q.full { - return false - } - if q.tail >= q.head { - return q.head-leftMarginIndex == need || q.head-leftMarginIndex >= need+minimumHeaderSize - } - return q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize -} diff --git a/vendor/github.com/allegro/bigcache/v3/shard.go b/vendor/github.com/allegro/bigcache/v3/shard.go deleted file mode 100644 index e80a759..0000000 --- a/vendor/github.com/allegro/bigcache/v3/shard.go +++ /dev/null @@ -1,454 +0,0 @@ -package bigcache - -import ( - "fmt" - "sync" - "sync/atomic" - - "github.com/allegro/bigcache/v3/queue" -) - -type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason) - -// Metadata contains information of a specific entry -type Metadata struct { - RequestCount uint32 -} - -type cacheShard struct { - hashmap map[uint64]uint32 - entries queue.BytesQueue - lock sync.RWMutex - entryBuffer []byte - onRemove onRemoveCallback - - isVerbose bool - statsEnabled bool - logger Logger - clock clock - lifeWindow uint64 - - hashmapStats map[uint64]uint32 - stats Stats - cleanEnabled bool -} - -func (s *cacheShard) getWithInfo(key string, hashedKey uint64) (entry []byte, resp Response, err error) { - currentTime := uint64(s.clock.Epoch()) - s.lock.RLock() - wrappedEntry, err := s.getWrappedEntry(hashedKey) - if err != nil { - s.lock.RUnlock() - return nil, resp, err - } - if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { - s.lock.RUnlock() - s.collision() - if s.isVerbose { - s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) - } - return nil, resp, ErrEntryNotFound - } - - entry = readEntry(wrappedEntry) - if s.isExpired(wrappedEntry, currentTime) { - resp.EntryStatus = Expired - } - s.lock.RUnlock() - s.hit(hashedKey) - return entry, resp, nil -} - -func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) { - s.lock.RLock() - wrappedEntry, err := s.getWrappedEntry(hashedKey) - if err != nil { - s.lock.RUnlock() - return nil, err - } - if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { - s.lock.RUnlock() - s.collision() - if s.isVerbose { - s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) - } - return nil, ErrEntryNotFound - } - entry := readEntry(wrappedEntry) - s.lock.RUnlock() - s.hit(hashedKey) - - return entry, nil -} - -func (s *cacheShard) getWrappedEntry(hashedKey uint64) ([]byte, error) { - itemIndex := s.hashmap[hashedKey] - - if itemIndex == 0 { - s.miss() - return nil, ErrEntryNotFound - } - - wrappedEntry, err := s.entries.Get(int(itemIndex)) - if err != nil { - s.miss() - return nil, err - } - - return wrappedEntry, err -} - -func (s *cacheShard) getValidWrapEntry(key string, hashedKey uint64) ([]byte, error) { - wrappedEntry, err := s.getWrappedEntry(hashedKey) - if err != nil { - return nil, err - } - - if !compareKeyFromEntry(wrappedEntry, key) { - s.collision() - if s.isVerbose { - s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, readKeyFromEntry(wrappedEntry), hashedKey) - } - - return nil, ErrEntryNotFound - } - s.hitWithoutLock(hashedKey) - - return wrappedEntry, nil -} - -func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error { - currentTimestamp := uint64(s.clock.Epoch()) - - s.lock.Lock() - - if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { - if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { - resetKeyFromEntry(previousEntry) - //remove hashkey - delete(s.hashmap, hashedKey) - } - } - - if !s.cleanEnabled { - if oldestEntry, err := s.entries.Peek(); err == nil { - s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) - } - } - - w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer) - - for { - if index, err := s.entries.Push(w); err == nil { - s.hashmap[hashedKey] = uint32(index) - s.lock.Unlock() - return nil - } - if s.removeOldestEntry(NoSpace) != nil { - s.lock.Unlock() - return fmt.Errorf("entry is bigger than max shard size") - } - } -} - -func (s *cacheShard) addNewWithoutLock(key string, hashedKey uint64, entry []byte) error { - currentTimestamp := uint64(s.clock.Epoch()) - - if !s.cleanEnabled { - if oldestEntry, err := s.entries.Peek(); err == nil { - s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) - } - } - - w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer) - - for { - if index, err := s.entries.Push(w); err == nil { - s.hashmap[hashedKey] = uint32(index) - return nil - } - if s.removeOldestEntry(NoSpace) != nil { - return fmt.Errorf("entry is bigger than max shard size") - } - } -} - -func (s *cacheShard) setWrappedEntryWithoutLock(currentTimestamp uint64, w []byte, hashedKey uint64) error { - if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { - if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { - resetKeyFromEntry(previousEntry) - } - } - - if !s.cleanEnabled { - if oldestEntry, err := s.entries.Peek(); err == nil { - s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) - } - } - - for { - if index, err := s.entries.Push(w); err == nil { - s.hashmap[hashedKey] = uint32(index) - return nil - } - if s.removeOldestEntry(NoSpace) != nil { - return fmt.Errorf("entry is bigger than max shard size") - } - } -} - -func (s *cacheShard) append(key string, hashedKey uint64, entry []byte) error { - s.lock.Lock() - wrappedEntry, err := s.getValidWrapEntry(key, hashedKey) - - if err == ErrEntryNotFound { - err = s.addNewWithoutLock(key, hashedKey, entry) - s.lock.Unlock() - return err - } - if err != nil { - s.lock.Unlock() - return err - } - - currentTimestamp := uint64(s.clock.Epoch()) - - w := appendToWrappedEntry(currentTimestamp, wrappedEntry, entry, &s.entryBuffer) - - err = s.setWrappedEntryWithoutLock(currentTimestamp, w, hashedKey) - s.lock.Unlock() - - return err -} - -func (s *cacheShard) del(hashedKey uint64) error { - // Optimistic pre-check using only readlock - s.lock.RLock() - { - itemIndex := s.hashmap[hashedKey] - - if itemIndex == 0 { - s.lock.RUnlock() - s.delmiss() - return ErrEntryNotFound - } - - if err := s.entries.CheckGet(int(itemIndex)); err != nil { - s.lock.RUnlock() - s.delmiss() - return err - } - } - s.lock.RUnlock() - - s.lock.Lock() - { - // After obtaining the writelock, we need to read the same again, - // since the data delivered earlier may be stale now - itemIndex := s.hashmap[hashedKey] - - if itemIndex == 0 { - s.lock.Unlock() - s.delmiss() - return ErrEntryNotFound - } - - wrappedEntry, err := s.entries.Get(int(itemIndex)) - if err != nil { - s.lock.Unlock() - s.delmiss() - return err - } - - delete(s.hashmap, hashedKey) - s.onRemove(wrappedEntry, Deleted) - if s.statsEnabled { - delete(s.hashmapStats, hashedKey) - } - resetKeyFromEntry(wrappedEntry) - } - s.lock.Unlock() - - s.delhit() - return nil -} - -func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool { - if s.isExpired(oldestEntry, currentTimestamp) { - evict(Expired) - return true - } - return false -} - -func (s *cacheShard) isExpired(oldestEntry []byte, currentTimestamp uint64) bool { - oldestTimestamp := readTimestampFromEntry(oldestEntry) - if currentTimestamp <= oldestTimestamp { // if currentTimestamp < oldestTimestamp, the result will out of uint64 limits; - return false - } - return currentTimestamp-oldestTimestamp > s.lifeWindow -} - -func (s *cacheShard) cleanUp(currentTimestamp uint64) { - s.lock.Lock() - for { - if oldestEntry, err := s.entries.Peek(); err != nil { - break - } else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted { - break - } - } - s.lock.Unlock() -} - -func (s *cacheShard) getEntry(hashedKey uint64) ([]byte, error) { - s.lock.RLock() - - entry, err := s.getWrappedEntry(hashedKey) - // copy entry - newEntry := make([]byte, len(entry)) - copy(newEntry, entry) - - s.lock.RUnlock() - - return newEntry, err -} - -func (s *cacheShard) copyHashedKeys() (keys []uint64, next int) { - s.lock.RLock() - keys = make([]uint64, len(s.hashmap)) - - for key := range s.hashmap { - keys[next] = key - next++ - } - - s.lock.RUnlock() - return keys, next -} - -func (s *cacheShard) removeOldestEntry(reason RemoveReason) error { - oldest, err := s.entries.Pop() - if err == nil { - hash := readHashFromEntry(oldest) - if hash == 0 { - // entry has been explicitly deleted with resetKeyFromEntry, ignore - return nil - } - delete(s.hashmap, hash) - s.onRemove(oldest, reason) - if s.statsEnabled { - delete(s.hashmapStats, hash) - } - return nil - } - return err -} - -func (s *cacheShard) reset(config Config) { - s.lock.Lock() - s.hashmap = make(map[uint64]uint32, config.initialShardSize()) - s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes) - s.entries.Reset() - s.lock.Unlock() -} - -func (s *cacheShard) resetStats() { - s.lock.Lock() - s.stats = Stats{} - s.lock.Unlock() -} - -func (s *cacheShard) len() int { - s.lock.RLock() - res := len(s.hashmap) - s.lock.RUnlock() - return res -} - -func (s *cacheShard) capacity() int { - s.lock.RLock() - res := s.entries.Capacity() - s.lock.RUnlock() - return res -} - -func (s *cacheShard) getStats() Stats { - var stats = Stats{ - Hits: atomic.LoadInt64(&s.stats.Hits), - Misses: atomic.LoadInt64(&s.stats.Misses), - DelHits: atomic.LoadInt64(&s.stats.DelHits), - DelMisses: atomic.LoadInt64(&s.stats.DelMisses), - Collisions: atomic.LoadInt64(&s.stats.Collisions), - } - return stats -} - -func (s *cacheShard) getKeyMetadataWithLock(key uint64) Metadata { - s.lock.RLock() - c := s.hashmapStats[key] - s.lock.RUnlock() - return Metadata{ - RequestCount: c, - } -} - -func (s *cacheShard) getKeyMetadata(key uint64) Metadata { - return Metadata{ - RequestCount: s.hashmapStats[key], - } -} - -func (s *cacheShard) hit(key uint64) { - atomic.AddInt64(&s.stats.Hits, 1) - if s.statsEnabled { - s.lock.Lock() - s.hashmapStats[key]++ - s.lock.Unlock() - } -} - -func (s *cacheShard) hitWithoutLock(key uint64) { - atomic.AddInt64(&s.stats.Hits, 1) - if s.statsEnabled { - s.hashmapStats[key]++ - } -} - -func (s *cacheShard) miss() { - atomic.AddInt64(&s.stats.Misses, 1) -} - -func (s *cacheShard) delhit() { - atomic.AddInt64(&s.stats.DelHits, 1) -} - -func (s *cacheShard) delmiss() { - atomic.AddInt64(&s.stats.DelMisses, 1) -} - -func (s *cacheShard) collision() { - atomic.AddInt64(&s.stats.Collisions, 1) -} - -func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard { - bytesQueueInitialCapacity := config.initialShardSize() * config.MaxEntrySize - maximumShardSizeInBytes := config.maximumShardSizeInBytes() - if maximumShardSizeInBytes > 0 && bytesQueueInitialCapacity > maximumShardSizeInBytes { - bytesQueueInitialCapacity = maximumShardSizeInBytes - } - return &cacheShard{ - hashmap: make(map[uint64]uint32, config.initialShardSize()), - hashmapStats: make(map[uint64]uint32, config.initialShardSize()), - entries: *queue.NewBytesQueue(bytesQueueInitialCapacity, maximumShardSizeInBytes, config.Verbose), - entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes), - onRemove: callback, - - isVerbose: config.Verbose, - logger: newLogger(config.Logger), - clock: clock, - lifeWindow: uint64(config.LifeWindow.Seconds()), - statsEnabled: config.StatsEnabled, - cleanEnabled: config.CleanWindow > 0, - } -} diff --git a/vendor/github.com/allegro/bigcache/v3/stats.go b/vendor/github.com/allegro/bigcache/v3/stats.go deleted file mode 100644 index 0715713..0000000 --- a/vendor/github.com/allegro/bigcache/v3/stats.go +++ /dev/null @@ -1,15 +0,0 @@ -package bigcache - -// Stats stores cache statistics -type Stats struct { - // Hits is a number of successfully found keys - Hits int64 `json:"hits"` - // Misses is a number of not found keys - Misses int64 `json:"misses"` - // DelHits is a number of successfully deleted keys - DelHits int64 `json:"delete_hits"` - // DelMisses is a number of not deleted keys - DelMisses int64 `json:"delete_misses"` - // Collisions is a number of happened key-collisions - Collisions int64 `json:"collisions"` -} diff --git a/vendor/github.com/allegro/bigcache/v3/utils.go b/vendor/github.com/allegro/bigcache/v3/utils.go deleted file mode 100644 index 2b6ac4f..0000000 --- a/vendor/github.com/allegro/bigcache/v3/utils.go +++ /dev/null @@ -1,23 +0,0 @@ -package bigcache - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func convertMBToBytes(value int) int { - return value * 1024 * 1024 -} - -func isPowerOfTwo(number int) bool { - return (number != 0) && (number&(number-1)) == 0 -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177b..0000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287..0000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8..0000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/bradfitz/gomemcache/LICENSE b/vendor/github.com/bradfitz/gomemcache/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/bradfitz/gomemcache/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go deleted file mode 100644 index cc45763..0000000 --- a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go +++ /dev/null @@ -1,733 +0,0 @@ -/* -Copyright 2011 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package memcache provides a client for the memcached cache server. -package memcache - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "net" - - "strconv" - "strings" - "sync" - "time" -) - -// Similar to: -// https://godoc.org/google.golang.org/appengine/memcache - -var ( - // ErrCacheMiss means that a Get failed because the item wasn't present. - ErrCacheMiss = errors.New("memcache: cache miss") - - // ErrCASConflict means that a CompareAndSwap call failed due to the - // cached value being modified between the Get and the CompareAndSwap. - // If the cached value was simply evicted rather than replaced, - // ErrNotStored will be returned instead. - ErrCASConflict = errors.New("memcache: compare-and-swap conflict") - - // ErrNotStored means that a conditional write operation (i.e. Add or - // CompareAndSwap) failed because the condition was not satisfied. - ErrNotStored = errors.New("memcache: item not stored") - - // ErrServer means that a server error occurred. - ErrServerError = errors.New("memcache: server error") - - // ErrNoStats means that no statistics were available. - ErrNoStats = errors.New("memcache: no statistics available") - - // ErrMalformedKey is returned when an invalid key is used. - // Keys must be at maximum 250 bytes long and not - // contain whitespace or control characters. - ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters") - - // ErrNoServers is returned when no servers are configured or available. - ErrNoServers = errors.New("memcache: no servers configured or available") -) - -const ( - // DefaultTimeout is the default socket read/write timeout. - DefaultTimeout = 100 * time.Millisecond - - // DefaultMaxIdleConns is the default maximum number of idle connections - // kept for any single address. - DefaultMaxIdleConns = 2 -) - -const buffered = 8 // arbitrary buffered channel size, for readability - -// resumableError returns true if err is only a protocol-level cache error. -// This is used to determine whether or not a server connection should -// be re-used or not. If an error occurs, by default we don't reuse the -// connection, unless it was just a cache error. -func resumableError(err error) bool { - switch err { - case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey: - return true - } - return false -} - -func legalKey(key string) bool { - if len(key) > 250 { - return false - } - for i := 0; i < len(key); i++ { - if key[i] <= ' ' || key[i] == 0x7f { - return false - } - } - return true -} - -var ( - crlf = []byte("\r\n") - space = []byte(" ") - resultOK = []byte("OK\r\n") - resultStored = []byte("STORED\r\n") - resultNotStored = []byte("NOT_STORED\r\n") - resultExists = []byte("EXISTS\r\n") - resultNotFound = []byte("NOT_FOUND\r\n") - resultDeleted = []byte("DELETED\r\n") - resultEnd = []byte("END\r\n") - resultOk = []byte("OK\r\n") - resultTouched = []byte("TOUCHED\r\n") - - resultClientErrorPrefix = []byte("CLIENT_ERROR ") - versionPrefix = []byte("VERSION") -) - -// New returns a memcache client using the provided server(s) -// with equal weight. If a server is listed multiple times, -// it gets a proportional amount of weight. -func New(server ...string) *Client { - ss := new(ServerList) - ss.SetServers(server...) - return NewFromSelector(ss) -} - -// NewFromSelector returns a new Client using the provided ServerSelector. -func NewFromSelector(ss ServerSelector) *Client { - return &Client{selector: ss} -} - -// Client is a memcache client. -// It is safe for unlocked use by multiple concurrent goroutines. -type Client struct { - // Timeout specifies the socket read/write timeout. - // If zero, DefaultTimeout is used. - Timeout time.Duration - - // MaxIdleConns specifies the maximum number of idle connections that will - // be maintained per address. If less than one, DefaultMaxIdleConns will be - // used. - // - // Consider your expected traffic rates and latency carefully. This should - // be set to a number higher than your peak parallel requests. - MaxIdleConns int - - selector ServerSelector - - lk sync.Mutex - freeconn map[string][]*conn -} - -// Item is an item to be got or stored in a memcached server. -type Item struct { - // Key is the Item's key (250 bytes maximum). - Key string - - // Value is the Item's value. - Value []byte - - // Flags are server-opaque flags whose semantics are entirely - // up to the app. - Flags uint32 - - // Expiration is the cache expiration time, in seconds: either a relative - // time from now (up to 1 month), or an absolute Unix epoch time. - // Zero means the Item has no expiration time. - Expiration int32 - - // Compare and swap ID. - casid uint64 -} - -// conn is a connection to a server. -type conn struct { - nc net.Conn - rw *bufio.ReadWriter - addr net.Addr - c *Client -} - -// release returns this connection back to the client's free pool -func (cn *conn) release() { - cn.c.putFreeConn(cn.addr, cn) -} - -func (cn *conn) extendDeadline() { - cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout())) -} - -// condRelease releases this connection if the error pointed to by err -// is nil (not an error) or is only a protocol level error (e.g. a -// cache miss). The purpose is to not recycle TCP connections that -// are bad. -func (cn *conn) condRelease(err *error) { - if *err == nil || resumableError(*err) { - cn.release() - } else { - cn.nc.Close() - } -} - -func (c *Client) putFreeConn(addr net.Addr, cn *conn) { - c.lk.Lock() - defer c.lk.Unlock() - if c.freeconn == nil { - c.freeconn = make(map[string][]*conn) - } - freelist := c.freeconn[addr.String()] - if len(freelist) >= c.maxIdleConns() { - cn.nc.Close() - return - } - c.freeconn[addr.String()] = append(freelist, cn) -} - -func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) { - c.lk.Lock() - defer c.lk.Unlock() - if c.freeconn == nil { - return nil, false - } - freelist, ok := c.freeconn[addr.String()] - if !ok || len(freelist) == 0 { - return nil, false - } - cn = freelist[len(freelist)-1] - c.freeconn[addr.String()] = freelist[:len(freelist)-1] - return cn, true -} - -func (c *Client) netTimeout() time.Duration { - if c.Timeout != 0 { - return c.Timeout - } - return DefaultTimeout -} - -func (c *Client) maxIdleConns() int { - if c.MaxIdleConns > 0 { - return c.MaxIdleConns - } - return DefaultMaxIdleConns -} - -// ConnectTimeoutError is the error type used when it takes -// too long to connect to the desired host. This level of -// detail can generally be ignored. -type ConnectTimeoutError struct { - Addr net.Addr -} - -func (cte *ConnectTimeoutError) Error() string { - return "memcache: connect timeout to " + cte.Addr.String() -} - -func (c *Client) dial(addr net.Addr) (net.Conn, error) { - nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout()) - if err == nil { - return nc, nil - } - - if ne, ok := err.(net.Error); ok && ne.Timeout() { - return nil, &ConnectTimeoutError{addr} - } - - return nil, err -} - -func (c *Client) getConn(addr net.Addr) (*conn, error) { - cn, ok := c.getFreeConn(addr) - if ok { - cn.extendDeadline() - return cn, nil - } - nc, err := c.dial(addr) - if err != nil { - return nil, err - } - cn = &conn{ - nc: nc, - addr: addr, - rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)), - c: c, - } - cn.extendDeadline() - return cn, nil -} - -func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { - addr, err := c.selector.PickServer(item.Key) - if err != nil { - return err - } - cn, err := c.getConn(addr) - if err != nil { - return err - } - defer cn.condRelease(&err) - if err = fn(c, cn.rw, item); err != nil { - return err - } - return nil -} - -func (c *Client) FlushAll() error { - return c.selector.Each(c.flushAllFromAddr) -} - -// Get gets the item for the given key. ErrCacheMiss is returned for a -// memcache cache miss. The key must be at most 250 bytes in length. -func (c *Client) Get(key string) (item *Item, err error) { - err = c.withKeyAddr(key, func(addr net.Addr) error { - return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it }) - }) - if err == nil && item == nil { - err = ErrCacheMiss - } - return -} - -// Touch updates the expiry for the given key. The seconds parameter is either -// a Unix timestamp or, if seconds is less than 1 month, the number of seconds -// into the future at which time the item will expire. Zero means the item has -// no expiration time. ErrCacheMiss is returned if the key is not in the cache. -// The key must be at most 250 bytes in length. -func (c *Client) Touch(key string, seconds int32) (err error) { - return c.withKeyAddr(key, func(addr net.Addr) error { - return c.touchFromAddr(addr, []string{key}, seconds) - }) -} - -func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) { - if !legalKey(key) { - return ErrMalformedKey - } - addr, err := c.selector.PickServer(key) - if err != nil { - return err - } - return fn(addr) -} - -func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) { - cn, err := c.getConn(addr) - if err != nil { - return err - } - defer cn.condRelease(&err) - return fn(cn.rw) -} - -func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error { - return c.withKeyAddr(key, func(addr net.Addr) error { - return c.withAddrRw(addr, fn) - }) -} - -func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error { - return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { - if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil { - return err - } - if err := rw.Flush(); err != nil { - return err - } - if err := parseGetResponse(rw.Reader, cb); err != nil { - return err - } - return nil - }) -} - -// flushAllFromAddr send the flush_all command to the given addr -func (c *Client) flushAllFromAddr(addr net.Addr) error { - return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { - if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil { - return err - } - if err := rw.Flush(); err != nil { - return err - } - line, err := rw.ReadSlice('\n') - if err != nil { - return err - } - switch { - case bytes.Equal(line, resultOk): - break - default: - return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line)) - } - return nil - }) -} - -// ping sends the version command to the given addr -func (c *Client) ping(addr net.Addr) error { - return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { - if _, err := fmt.Fprintf(rw, "version\r\n"); err != nil { - return err - } - if err := rw.Flush(); err != nil { - return err - } - line, err := rw.ReadSlice('\n') - if err != nil { - return err - } - - switch { - case bytes.HasPrefix(line, versionPrefix): - break - default: - return fmt.Errorf("memcache: unexpected response line from ping: %q", string(line)) - } - return nil - }) -} - -func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error { - return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { - for _, key := range keys { - if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil { - return err - } - if err := rw.Flush(); err != nil { - return err - } - line, err := rw.ReadSlice('\n') - if err != nil { - return err - } - switch { - case bytes.Equal(line, resultTouched): - break - case bytes.Equal(line, resultNotFound): - return ErrCacheMiss - default: - return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line)) - } - } - return nil - }) -} - -// GetMulti is a batch version of Get. The returned map from keys to -// items may have fewer elements than the input slice, due to memcache -// cache misses. Each key must be at most 250 bytes in length. -// If no error is returned, the returned map will also be non-nil. -func (c *Client) GetMulti(keys []string) (map[string]*Item, error) { - var lk sync.Mutex - m := make(map[string]*Item) - addItemToMap := func(it *Item) { - lk.Lock() - defer lk.Unlock() - m[it.Key] = it - } - - keyMap := make(map[net.Addr][]string) - for _, key := range keys { - if !legalKey(key) { - return nil, ErrMalformedKey - } - addr, err := c.selector.PickServer(key) - if err != nil { - return nil, err - } - keyMap[addr] = append(keyMap[addr], key) - } - - ch := make(chan error, buffered) - for addr, keys := range keyMap { - go func(addr net.Addr, keys []string) { - ch <- c.getFromAddr(addr, keys, addItemToMap) - }(addr, keys) - } - - var err error - for _ = range keyMap { - if ge := <-ch; ge != nil { - err = ge - } - } - return m, err -} - -// parseGetResponse reads a GET response from r and calls cb for each -// read and allocated Item -func parseGetResponse(r *bufio.Reader, cb func(*Item)) error { - for { - line, err := r.ReadSlice('\n') - if err != nil { - return err - } - if bytes.Equal(line, resultEnd) { - return nil - } - it := new(Item) - size, err := scanGetResponseLine(line, it) - if err != nil { - return err - } - it.Value = make([]byte, size+2) - _, err = io.ReadFull(r, it.Value) - if err != nil { - it.Value = nil - return err - } - if !bytes.HasSuffix(it.Value, crlf) { - it.Value = nil - return fmt.Errorf("memcache: corrupt get result read") - } - it.Value = it.Value[:size] - cb(it) - } -} - -// scanGetResponseLine populates it and returns the declared size of the item. -// It does not read the bytes of the item. -func scanGetResponseLine(line []byte, it *Item) (size int, err error) { - pattern := "VALUE %s %d %d %d\r\n" - dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid} - if bytes.Count(line, space) == 3 { - pattern = "VALUE %s %d %d\r\n" - dest = dest[:3] - } - n, err := fmt.Sscanf(string(line), pattern, dest...) - if err != nil || n != len(dest) { - return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line) - } - return size, nil -} - -// Set writes the given item, unconditionally. -func (c *Client) Set(item *Item) error { - return c.onItem(item, (*Client).set) -} - -func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { - return c.populateOne(rw, "set", item) -} - -// Add writes the given item, if no value already exists for its -// key. ErrNotStored is returned if that condition is not met. -func (c *Client) Add(item *Item) error { - return c.onItem(item, (*Client).add) -} - -func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { - return c.populateOne(rw, "add", item) -} - -// Replace writes the given item, but only if the server *does* -// already hold data for this key -func (c *Client) Replace(item *Item) error { - return c.onItem(item, (*Client).replace) -} - -func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { - return c.populateOne(rw, "replace", item) -} - -// Append appends the given item to the existing item, if a value already -// exists for its key. ErrNotStored is returned if that condition is not met. -func (c *Client) Append(item *Item) error { - return c.onItem(item, (*Client).append) -} - -func (c *Client) append(rw *bufio.ReadWriter, item *Item) error { - return c.populateOne(rw, "append", item) -} - -// Prepend prepends the given item to the existing item, if a value already -// exists for its key. ErrNotStored is returned if that condition is not met. -func (c *Client) Prepend(item *Item) error { - return c.onItem(item, (*Client).prepend) -} - -func (c *Client) prepend(rw *bufio.ReadWriter, item *Item) error { - return c.populateOne(rw, "prepend", item) -} - -// CompareAndSwap writes the given item that was previously returned -// by Get, if the value was neither modified or evicted between the -// Get and the CompareAndSwap calls. The item's Key should not change -// between calls but all other item fields may differ. ErrCASConflict -// is returned if the value was modified in between the -// calls. ErrNotStored is returned if the value was evicted in between -// the calls. -func (c *Client) CompareAndSwap(item *Item) error { - return c.onItem(item, (*Client).cas) -} - -func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { - return c.populateOne(rw, "cas", item) -} - -func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error { - if !legalKey(item.Key) { - return ErrMalformedKey - } - var err error - if verb == "cas" { - _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n", - verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid) - } else { - _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n", - verb, item.Key, item.Flags, item.Expiration, len(item.Value)) - } - if err != nil { - return err - } - if _, err = rw.Write(item.Value); err != nil { - return err - } - if _, err := rw.Write(crlf); err != nil { - return err - } - if err := rw.Flush(); err != nil { - return err - } - line, err := rw.ReadSlice('\n') - if err != nil { - return err - } - switch { - case bytes.Equal(line, resultStored): - return nil - case bytes.Equal(line, resultNotStored): - return ErrNotStored - case bytes.Equal(line, resultExists): - return ErrCASConflict - case bytes.Equal(line, resultNotFound): - return ErrCacheMiss - } - return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line)) -} - -func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) { - _, err := fmt.Fprintf(rw, format, args...) - if err != nil { - return nil, err - } - if err := rw.Flush(); err != nil { - return nil, err - } - line, err := rw.ReadSlice('\n') - return line, err -} - -func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error { - line, err := writeReadLine(rw, format, args...) - if err != nil { - return err - } - switch { - case bytes.Equal(line, resultOK): - return nil - case bytes.Equal(line, expect): - return nil - case bytes.Equal(line, resultNotStored): - return ErrNotStored - case bytes.Equal(line, resultExists): - return ErrCASConflict - case bytes.Equal(line, resultNotFound): - return ErrCacheMiss - } - return fmt.Errorf("memcache: unexpected response line: %q", string(line)) -} - -// Delete deletes the item with the provided key. The error ErrCacheMiss is -// returned if the item didn't already exist in the cache. -func (c *Client) Delete(key string) error { - return c.withKeyRw(key, func(rw *bufio.ReadWriter) error { - return writeExpectf(rw, resultDeleted, "delete %s\r\n", key) - }) -} - -// DeleteAll deletes all items in the cache. -func (c *Client) DeleteAll() error { - return c.withKeyRw("", func(rw *bufio.ReadWriter) error { - return writeExpectf(rw, resultDeleted, "flush_all\r\n") - }) -} - -// Ping checks all instances if they are alive. Returns error if any -// of them is down. -func (c *Client) Ping() error { - return c.selector.Each(c.ping) -} - -// Increment atomically increments key by delta. The return value is -// the new value after being incremented or an error. If the value -// didn't exist in memcached the error is ErrCacheMiss. The value in -// memcached must be an decimal number, or an error will be returned. -// On 64-bit overflow, the new value wraps around. -func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) { - return c.incrDecr("incr", key, delta) -} - -// Decrement atomically decrements key by delta. The return value is -// the new value after being decremented or an error. If the value -// didn't exist in memcached the error is ErrCacheMiss. The value in -// memcached must be an decimal number, or an error will be returned. -// On underflow, the new value is capped at zero and does not wrap -// around. -func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) { - return c.incrDecr("decr", key, delta) -} - -func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) { - var val uint64 - err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error { - line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta) - if err != nil { - return err - } - switch { - case bytes.Equal(line, resultNotFound): - return ErrCacheMiss - case bytes.HasPrefix(line, resultClientErrorPrefix): - errMsg := line[len(resultClientErrorPrefix) : len(line)-2] - return errors.New("memcache: client error: " + string(errMsg)) - } - val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64) - if err != nil { - return err - } - return nil - }) - return val, err -} diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go deleted file mode 100644 index 89ad81e..0000000 --- a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2011 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package memcache - -import ( - "hash/crc32" - "net" - "strings" - "sync" -) - -// ServerSelector is the interface that selects a memcache server -// as a function of the item's key. -// -// All ServerSelector implementations must be safe for concurrent use -// by multiple goroutines. -type ServerSelector interface { - // PickServer returns the server address that a given item - // should be shared onto. - PickServer(key string) (net.Addr, error) - Each(func(net.Addr) error) error -} - -// ServerList is a simple ServerSelector. Its zero value is usable. -type ServerList struct { - mu sync.RWMutex - addrs []net.Addr -} - -// staticAddr caches the Network() and String() values from any net.Addr. -type staticAddr struct { - ntw, str string -} - -func newStaticAddr(a net.Addr) net.Addr { - return &staticAddr{ - ntw: a.Network(), - str: a.String(), - } -} - -func (s *staticAddr) Network() string { return s.ntw } -func (s *staticAddr) String() string { return s.str } - -// SetServers changes a ServerList's set of servers at runtime and is -// safe for concurrent use by multiple goroutines. -// -// Each server is given equal weight. A server is given more weight -// if it's listed multiple times. -// -// SetServers returns an error if any of the server names fail to -// resolve. No attempt is made to connect to the server. If any error -// is returned, no changes are made to the ServerList. -func (ss *ServerList) SetServers(servers ...string) error { - naddr := make([]net.Addr, len(servers)) - for i, server := range servers { - if strings.Contains(server, "/") { - addr, err := net.ResolveUnixAddr("unix", server) - if err != nil { - return err - } - naddr[i] = newStaticAddr(addr) - } else { - tcpaddr, err := net.ResolveTCPAddr("tcp", server) - if err != nil { - return err - } - naddr[i] = newStaticAddr(tcpaddr) - } - } - - ss.mu.Lock() - defer ss.mu.Unlock() - ss.addrs = naddr - return nil -} - -// Each iterates over each server calling the given function -func (ss *ServerList) Each(f func(net.Addr) error) error { - ss.mu.RLock() - defer ss.mu.RUnlock() - for _, a := range ss.addrs { - if err := f(a); nil != err { - return err - } - } - return nil -} - -// keyBufPool returns []byte buffers for use by PickServer's call to -// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the -// copies, which at least are bounded in size and small) -var keyBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 256) - return &b - }, -} - -func (ss *ServerList) PickServer(key string) (net.Addr, error) { - ss.mu.RLock() - defer ss.mu.RUnlock() - if len(ss.addrs) == 0 { - return nil, ErrNoServers - } - if len(ss.addrs) == 1 { - return ss.addrs[0], nil - } - bufp := keyBufPool.Get().(*[]byte) - n := copy(*bufp, key) - cs := crc32.ChecksumIEEE((*bufp)[:n]) - keyBufPool.Put(bufp) - - return ss.addrs[cs%uint32(len(ss.addrs))], nil -} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore deleted file mode 100644 index 50d95c5..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# IDEs -.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml deleted file mode 100644 index c79105c..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.13 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE deleted file mode 100644 index 89b8179..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md deleted file mode 100644 index 16abdfc..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. - -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go deleted file mode 100644 index 3676ee4..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 4848233..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index 2c56c1e..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,161 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - if randomizationFactor == 0 { - return currentInterval // make sure no randomness is used when randomizationFactor is 0. - } - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index 1ce2507..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,112 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - var err error - var next time.Duration - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return cerr - } - - return err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go deleted file mode 100644 index df9d68b..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ /dev/null @@ -1,97 +0,0 @@ -package backoff - -import ( - "context" - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOff - ctx context.Context - timer Timer - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: b, - ctx: getContext(b), - timer: timer, - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.ctx.Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - t.timer.Start(next) - return t.timer.C() -} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go deleted file mode 100644 index 8120d02..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/timer.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -type Timer interface { - Start(duration time.Duration) - Stop() - C() <-chan time.Time -} - -// defaultTimer implements Timer interface using time.Timer -type defaultTimer struct { - timer *time.Timer -} - -// C returns the timers channel which receives the current time when the timer fires. -func (t *defaultTimer) C() <-chan time.Time { - return t.timer.C -} - -// Start starts the timer to fire after the given duration -func (t *defaultTimer) Start(duration time.Duration) { - if t.timer == nil { - t.timer = time.NewTimer(duration) - } else { - t.timer.Reset(duration) - } -} - -// Stop is called when the timer is not used anymore and resources may be freed. -func (t *defaultTimer) Stop() { - if t.timer != nil { - t.timer.Stop() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca..0000000 --- a/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b5306..0000000 --- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 792b4a6..0000000 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# xxhash - -[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) -[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index 15c835d..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,235 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b80..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index be8db5b..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI - - // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX - JGE finalize - -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) - - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 4a5a821..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index fc9bea7..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 376e0ca..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "unsafe" -) - -// In the future it's possible that compiler optimizations will make these -// XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. -// If that happens, even if we keep these functions they can be replaced with -// the trivial safe code. - -// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: -// -// var b []byte -// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) -// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data -// bh.Len = len(s) -// bh.Cap = len(s) -// -// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough -// weight to this sequence of expressions that any function that uses it will -// not be inlined. Instead, the functions below use a different unsafe -// conversion designed to minimize the inliner weight and allow both to be -// inlined. There is also a test (TestInlining) which verifies that these are -// inlined. -// -// See https://github.com/golang/go/issues/42739 for discussion. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) - // d.Write always returns len(s), nil. - // Ignoring the return output and returning these fixed values buys a - // savings of 6 in the inliner's cost model. - return len(s), nil -} - -// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout -// of the first two words is the same as the layout of a string. -type sliceHeader struct { - s string - cap int -} diff --git a/vendor/github.com/coocood/freecache/.travis.yml b/vendor/github.com/coocood/freecache/.travis.yml deleted file mode 100644 index 50d617a..0000000 --- a/vendor/github.com/coocood/freecache/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - "1.10" - - "1.11" - - "1.12" - - "1.13" - -script: go get github.com/coocood/freecache && go test -race diff --git a/vendor/github.com/coocood/freecache/LICENSE b/vendor/github.com/coocood/freecache/LICENSE deleted file mode 100644 index fb5a06b..0000000 --- a/vendor/github.com/coocood/freecache/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2015 Ewan Chou. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/coocood/freecache/README.md b/vendor/github.com/coocood/freecache/README.md deleted file mode 100644 index e68b2c3..0000000 --- a/vendor/github.com/coocood/freecache/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# FreeCache - A cache library for Go with zero GC overhead and high concurrent performance. - -Long lived objects in memory introduce expensive GC overhead, With FreeCache, you can cache unlimited number of objects in memory -without increased latency and degraded throughput. - -[![Build Status](https://travis-ci.org/coocood/freecache.png?branch=master)](https://travis-ci.org/coocood/freecache) -[![GoCover](http://gocover.io/_badge/github.com/coocood/freecache)](http://gocover.io/github.com/coocood/freecache) -[![GoDoc](https://godoc.org/github.com/coocood/freecache?status.svg)](https://godoc.org/github.com/coocood/freecache) - -## Features - -* Store hundreds of millions of entries -* Zero GC overhead -* High concurrent thread-safe access -* Pure Go implementation -* Expiration support -* Nearly LRU algorithm -* Strictly limited memory usage -* Come with a toy server that supports a few basic Redis commands with pipeline -* Iterator support - -## Performance - -Here is the benchmark result compares to built-in map, `Set` performance is about 2x faster than built-in map, `Get` performance is about 1/2x slower than built-in map. Since it is single threaded benchmark, in multi-threaded environment, -FreeCache should be many times faster than single lock protected built-in map. - - BenchmarkCacheSet 3000000 446 ns/op - BenchmarkMapSet 2000000 861 ns/op - BenchmarkCacheGet 3000000 517 ns/op - BenchmarkMapGet 10000000 212 ns/op - -## Example Usage - -```go -// In bytes, where 1024 * 1024 represents a single Megabyte, and 100 * 1024*1024 represents 100 Megabytes. -cacheSize := 100 * 1024 * 1024 -cache := freecache.NewCache(cacheSize) -debug.SetGCPercent(20) -key := []byte("abc") -val := []byte("def") -expire := 60 // expire in 60 seconds -cache.Set(key, val, expire) -got, err := cache.Get(key) -if err != nil { - fmt.Println(err) -} else { - fmt.Printf("%s\n", got) -} -affected := cache.Del(key) -fmt.Println("deleted key ", affected) -fmt.Println("entry count ", cache.EntryCount()) -``` - -## Notice - -* Memory is preallocated. -* If you allocate large amount of memory, you may need to set `debug.SetGCPercent()` -to a much lower percentage to get a normal GC frequency. - -## How it is done - -FreeCache avoids GC overhead by reducing the number of pointers. -No matter how many entries stored in it, there are only 512 pointers. -The data set is sharded into 256 segments by the hash value of the key. -Each segment has only two pointers, one is the ring buffer that stores keys and values, -the other one is the index slice which used to lookup for an entry. -Each segment has its own lock, so it supports high concurrent access. - -## TODO - -* Support dump to file and load from file. -* Support resize cache size at runtime. - -## License - -The MIT License diff --git a/vendor/github.com/coocood/freecache/cache.go b/vendor/github.com/coocood/freecache/cache.go deleted file mode 100644 index 2499e01..0000000 --- a/vendor/github.com/coocood/freecache/cache.go +++ /dev/null @@ -1,368 +0,0 @@ -package freecache - -import ( - "encoding/binary" - "sync" - "sync/atomic" - - "github.com/cespare/xxhash/v2" -) - -const ( - // segmentCount represents the number of segments within a freecache instance. - segmentCount = 256 - // segmentAndOpVal is bitwise AND applied to the hashVal to find the segment id. - segmentAndOpVal = 255 - minBufSize = 512 * 1024 -) - -// Cache is a freecache instance. -type Cache struct { - locks [segmentCount]sync.Mutex - segments [segmentCount]segment -} - -type Updater func(value []byte, found bool) (newValue []byte, replace bool, expireSeconds int) - -func hashFunc(data []byte) uint64 { - return xxhash.Sum64(data) -} - -// NewCache returns a newly initialize cache by size. -// The cache size will be set to 512KB at minimum. -// If the size is set relatively large, you should call -// `debug.SetGCPercent()`, set it to a much smaller value -// to limit the memory consumption and GC pause time. -func NewCache(size int) (cache *Cache) { - return NewCacheCustomTimer(size, defaultTimer{}) -} - -// NewCacheCustomTimer returns new cache with custom timer. -func NewCacheCustomTimer(size int, timer Timer) (cache *Cache) { - if size < minBufSize { - size = minBufSize - } - if timer == nil { - timer = defaultTimer{} - } - cache = new(Cache) - for i := 0; i < segmentCount; i++ { - cache.segments[i] = newSegment(size/segmentCount, i, timer) - } - return -} - -// Set sets a key, value and expiration for a cache entry and stores it in the cache. -// If the key is larger than 65535 or value is larger than 1/1024 of the cache size, -// the entry will not be written to the cache. expireSeconds <= 0 means no expire, -// but it can be evicted when cache is full. -func (cache *Cache) Set(key, value []byte, expireSeconds int) (err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - err = cache.segments[segID].set(key, value, hashVal, expireSeconds) - cache.locks[segID].Unlock() - return -} - -// Touch updates the expiration time of an existing key. expireSeconds <= 0 means no expire, -// but it can be evicted when cache is full. -func (cache *Cache) Touch(key []byte, expireSeconds int) (err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - err = cache.segments[segID].touch(key, hashVal, expireSeconds) - cache.locks[segID].Unlock() - return -} - -// Get returns the value or not found error. -func (cache *Cache) Get(key []byte) (value []byte, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - value, _, err = cache.segments[segID].get(key, nil, hashVal, false) - cache.locks[segID].Unlock() - return -} - -// GetFn is equivalent to Get or GetWithBuf, but it attempts to be zero-copy, -// calling the provided function with slice view over the current underlying -// value of the key in memory. The slice is constrained in length and capacity. -// -// In moth cases, this method will not alloc a byte buffer. The only exception -// is when the value wraps around the underlying segment ring buffer. -// -// The method will return ErrNotFound is there's a miss, and the function will -// not be called. Errors returned by the function will be propagated. -func (cache *Cache) GetFn(key []byte, fn func([]byte) error) (err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - err = cache.segments[segID].view(key, fn, hashVal, false) - cache.locks[segID].Unlock() - return -} - -// GetOrSet returns existing value or if record doesn't exist -// it sets a new key, value and expiration for a cache entry and stores it in the cache, returns nil in that case -func (cache *Cache) GetOrSet(key, value []byte, expireSeconds int) (retValue []byte, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - defer cache.locks[segID].Unlock() - - retValue, _, err = cache.segments[segID].get(key, nil, hashVal, false) - if err != nil { - err = cache.segments[segID].set(key, value, hashVal, expireSeconds) - } - return -} - -// SetAndGet sets a key, value and expiration for a cache entry and stores it in the cache. -// If the key is larger than 65535 or value is larger than 1/1024 of the cache size, -// the entry will not be written to the cache. expireSeconds <= 0 means no expire, -// but it can be evicted when cache is full. Returns existing value if record exists -// with a bool value to indicate whether an existing record was found -func (cache *Cache) SetAndGet(key, value []byte, expireSeconds int) (retValue []byte, found bool, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - defer cache.locks[segID].Unlock() - - retValue, _, err = cache.segments[segID].get(key, nil, hashVal, false) - if err == nil { - found = true - } - err = cache.segments[segID].set(key, value, hashVal, expireSeconds) - return -} - -// Update gets value for a key, passes it to updater function that decides if set should be called as well -// This allows for an atomic Get plus Set call using the existing value to decide on whether to call Set. -// If the key is larger than 65535 or value is larger than 1/1024 of the cache size, -// the entry will not be written to the cache. expireSeconds <= 0 means no expire, -// but it can be evicted when cache is full. Returns bool value to indicate if existing record was found along with bool -// value indicating the value was replaced and error if any -func (cache *Cache) Update(key []byte, updater Updater) (found bool, replaced bool, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - defer cache.locks[segID].Unlock() - - retValue, _, err := cache.segments[segID].get(key, nil, hashVal, false) - if err == nil { - found = true - } else { - err = nil // Clear ErrNotFound error since we're returning found flag - } - value, replaced, expireSeconds := updater(retValue, found) - if !replaced { - return - } - err = cache.segments[segID].set(key, value, hashVal, expireSeconds) - return -} - -// Peek returns the value or not found error, without updating access time or counters. -func (cache *Cache) Peek(key []byte) (value []byte, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - value, _, err = cache.segments[segID].get(key, nil, hashVal, true) - cache.locks[segID].Unlock() - return -} - -// PeekFn is equivalent to Peek, but it attempts to be zero-copy, calling the -// provided function with slice view over the current underlying value of the -// key in memory. The slice is constrained in length and capacity. -// -// In moth cases, this method will not alloc a byte buffer. The only exception -// is when the value wraps around the underlying segment ring buffer. -// -// The method will return ErrNotFound is there's a miss, and the function will -// not be called. Errors returned by the function will be propagated. -func (cache *Cache) PeekFn(key []byte, fn func([]byte) error) (err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - err = cache.segments[segID].view(key, fn, hashVal, true) - cache.locks[segID].Unlock() - return -} - -// GetWithBuf copies the value to the buf or returns not found error. -// This method doesn't allocate memory when the capacity of buf is greater or equal to value. -func (cache *Cache) GetWithBuf(key, buf []byte) (value []byte, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - value, _, err = cache.segments[segID].get(key, buf, hashVal, false) - cache.locks[segID].Unlock() - return -} - -// GetWithExpiration returns the value with expiration or not found error. -func (cache *Cache) GetWithExpiration(key []byte) (value []byte, expireAt uint32, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - value, expireAt, err = cache.segments[segID].get(key, nil, hashVal, false) - cache.locks[segID].Unlock() - return -} - -// TTL returns the TTL time left for a given key or a not found error. -func (cache *Cache) TTL(key []byte) (timeLeft uint32, err error) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - timeLeft, err = cache.segments[segID].ttl(key, hashVal) - cache.locks[segID].Unlock() - return -} - -// Del deletes an item in the cache by key and returns true or false if a delete occurred. -func (cache *Cache) Del(key []byte) (affected bool) { - hashVal := hashFunc(key) - segID := hashVal & segmentAndOpVal - cache.locks[segID].Lock() - affected = cache.segments[segID].del(key, hashVal) - cache.locks[segID].Unlock() - return -} - -// SetInt stores in integer value in the cache. -func (cache *Cache) SetInt(key int64, value []byte, expireSeconds int) (err error) { - var bKey [8]byte - binary.LittleEndian.PutUint64(bKey[:], uint64(key)) - return cache.Set(bKey[:], value, expireSeconds) -} - -// GetInt returns the value for an integer within the cache or a not found error. -func (cache *Cache) GetInt(key int64) (value []byte, err error) { - var bKey [8]byte - binary.LittleEndian.PutUint64(bKey[:], uint64(key)) - return cache.Get(bKey[:]) -} - -// GetIntWithExpiration returns the value and expiration or a not found error. -func (cache *Cache) GetIntWithExpiration(key int64) (value []byte, expireAt uint32, err error) { - var bKey [8]byte - binary.LittleEndian.PutUint64(bKey[:], uint64(key)) - return cache.GetWithExpiration(bKey[:]) -} - -// DelInt deletes an item in the cache by int key and returns true or false if a delete occurred. -func (cache *Cache) DelInt(key int64) (affected bool) { - var bKey [8]byte - binary.LittleEndian.PutUint64(bKey[:], uint64(key)) - return cache.Del(bKey[:]) -} - -// EvacuateCount is a metric indicating the number of times an eviction occurred. -func (cache *Cache) EvacuateCount() (count int64) { - for i := range cache.segments { - count += atomic.LoadInt64(&cache.segments[i].totalEvacuate) - } - return -} - -// ExpiredCount is a metric indicating the number of times an expire occurred. -func (cache *Cache) ExpiredCount() (count int64) { - for i := range cache.segments { - count += atomic.LoadInt64(&cache.segments[i].totalExpired) - } - return -} - -// EntryCount returns the number of items currently in the cache. -func (cache *Cache) EntryCount() (entryCount int64) { - for i := range cache.segments { - entryCount += atomic.LoadInt64(&cache.segments[i].entryCount) - } - return -} - -// AverageAccessTime returns the average unix timestamp when a entry being accessed. -// Entries have greater access time will be evacuated when it -// is about to be overwritten by new value. -func (cache *Cache) AverageAccessTime() int64 { - var entryCount, totalTime int64 - for i := range cache.segments { - totalTime += atomic.LoadInt64(&cache.segments[i].totalTime) - entryCount += atomic.LoadInt64(&cache.segments[i].totalCount) - } - if entryCount == 0 { - return 0 - } else { - return totalTime / entryCount - } -} - -// HitCount is a metric that returns number of times a key was found in the cache. -func (cache *Cache) HitCount() (count int64) { - for i := range cache.segments { - count += atomic.LoadInt64(&cache.segments[i].hitCount) - } - return -} - -// MissCount is a metric that returns the number of times a miss occurred in the cache. -func (cache *Cache) MissCount() (count int64) { - for i := range cache.segments { - count += atomic.LoadInt64(&cache.segments[i].missCount) - } - return -} - -// LookupCount is a metric that returns the number of times a lookup for a given key occurred. -func (cache *Cache) LookupCount() int64 { - return cache.HitCount() + cache.MissCount() -} - -// HitRate is the ratio of hits over lookups. -func (cache *Cache) HitRate() float64 { - hitCount, missCount := cache.HitCount(), cache.MissCount() - lookupCount := hitCount + missCount - if lookupCount == 0 { - return 0 - } else { - return float64(hitCount) / float64(lookupCount) - } -} - -// OverwriteCount indicates the number of times entries have been overriden. -func (cache *Cache) OverwriteCount() (overwriteCount int64) { - for i := range cache.segments { - overwriteCount += atomic.LoadInt64(&cache.segments[i].overwrites) - } - return -} - -// TouchedCount indicates the number of times entries have had their expiration time extended. -func (cache *Cache) TouchedCount() (touchedCount int64) { - for i := range cache.segments { - touchedCount += atomic.LoadInt64(&cache.segments[i].touched) - } - return -} - -// Clear clears the cache. -func (cache *Cache) Clear() { - for i := range cache.segments { - cache.locks[i].Lock() - cache.segments[i].clear() - cache.locks[i].Unlock() - } -} - -// ResetStatistics refreshes the current state of the statistics. -func (cache *Cache) ResetStatistics() { - for i := range cache.segments { - cache.locks[i].Lock() - cache.segments[i].resetStatistics() - cache.locks[i].Unlock() - } -} diff --git a/vendor/github.com/coocood/freecache/iterator.go b/vendor/github.com/coocood/freecache/iterator.go deleted file mode 100644 index 6f83d99..0000000 --- a/vendor/github.com/coocood/freecache/iterator.go +++ /dev/null @@ -1,79 +0,0 @@ -package freecache - -import ( - "unsafe" -) - -// Iterator iterates the entries for the cache. -type Iterator struct { - cache *Cache - segmentIdx int - slotIdx int - entryIdx int -} - -// Entry represents a key/value pair. -type Entry struct { - Key []byte - Value []byte -} - -// Next returns the next entry for the iterator. -// The order of the entries is not guaranteed. -// If there is no more entries to return, nil will be returned. -func (it *Iterator) Next() *Entry { - for it.segmentIdx < 256 { - entry := it.nextForSegment(it.segmentIdx) - if entry != nil { - return entry - } - it.segmentIdx++ - it.slotIdx = 0 - it.entryIdx = 0 - } - return nil -} - -func (it *Iterator) nextForSegment(segIdx int) *Entry { - it.cache.locks[segIdx].Lock() - defer it.cache.locks[segIdx].Unlock() - seg := &it.cache.segments[segIdx] - for it.slotIdx < 256 { - entry := it.nextForSlot(seg, it.slotIdx) - if entry != nil { - return entry - } - it.slotIdx++ - it.entryIdx = 0 - } - return nil -} - -func (it *Iterator) nextForSlot(seg *segment, slotId int) *Entry { - slotOff := int32(it.slotIdx) * seg.slotCap - slot := seg.slotsData[slotOff : slotOff+seg.slotLens[it.slotIdx] : slotOff+seg.slotCap] - for it.entryIdx < len(slot) { - ptr := slot[it.entryIdx] - it.entryIdx++ - now := seg.timer.Now() - var hdrBuf [ENTRY_HDR_SIZE]byte - seg.rb.ReadAt(hdrBuf[:], ptr.offset) - hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) - if hdr.expireAt == 0 || hdr.expireAt > now { - entry := new(Entry) - entry.Key = make([]byte, hdr.keyLen) - entry.Value = make([]byte, hdr.valLen) - seg.rb.ReadAt(entry.Key, ptr.offset+ENTRY_HDR_SIZE) - seg.rb.ReadAt(entry.Value, ptr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen)) - return entry - } - } - return nil -} - -// NewIterator creates a new iterator for the cache. -func (cache *Cache) NewIterator() *Iterator { - return &Iterator{ - cache: cache, - } -} diff --git a/vendor/github.com/coocood/freecache/ringbuf.go b/vendor/github.com/coocood/freecache/ringbuf.go deleted file mode 100644 index 71e3586..0000000 --- a/vendor/github.com/coocood/freecache/ringbuf.go +++ /dev/null @@ -1,250 +0,0 @@ -package freecache - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -var ErrOutOfRange = errors.New("out of range") - -// Ring buffer has a fixed size, when data exceeds the -// size, old data will be overwritten by new data. -// It only contains the data in the stream from begin to end -type RingBuf struct { - begin int64 // beginning offset of the data stream. - end int64 // ending offset of the data stream. - data []byte - index int //range from '0' to 'len(rb.data)-1' -} - -func NewRingBuf(size int, begin int64) (rb RingBuf) { - rb.data = make([]byte, size) - rb.Reset(begin) - return -} - -// Reset the ring buffer -// -// Parameters: -// begin: beginning offset of the data stream -func (rb *RingBuf) Reset(begin int64) { - rb.begin = begin - rb.end = begin - rb.index = 0 -} - -// Create a copy of the buffer. -func (rb *RingBuf) Dump() []byte { - dump := make([]byte, len(rb.data)) - copy(dump, rb.data) - return dump -} - -func (rb *RingBuf) String() string { - return fmt.Sprintf("[size:%v, start:%v, end:%v, index:%v]", len(rb.data), rb.begin, rb.end, rb.index) -} - -func (rb *RingBuf) Size() int64 { - return int64(len(rb.data)) -} - -func (rb *RingBuf) Begin() int64 { - return rb.begin -} - -func (rb *RingBuf) End() int64 { - return rb.end -} - -// read up to len(p), at off of the data stream. -func (rb *RingBuf) ReadAt(p []byte, off int64) (n int, err error) { - if off > rb.end || off < rb.begin { - err = ErrOutOfRange - return - } - readOff := rb.getDataOff(off) - readEnd := readOff + int(rb.end-off) - if readEnd <= len(rb.data) { - n = copy(p, rb.data[readOff:readEnd]) - } else { - n = copy(p, rb.data[readOff:]) - if n < len(p) { - n += copy(p[n:], rb.data[:readEnd-len(rb.data)]) - } - } - if n < len(p) { - err = io.EOF - } - return -} - -func (rb *RingBuf) getDataOff(off int64) int { - var dataOff int - if rb.end-rb.begin < int64(len(rb.data)) { - dataOff = int(off - rb.begin) - } else { - dataOff = rb.index + int(off-rb.begin) - } - if dataOff >= len(rb.data) { - dataOff -= len(rb.data) - } - return dataOff -} - -// Slice returns a slice of the supplied range of the ring buffer. It will -// not alloc unless the requested range wraps the ring buffer. -func (rb *RingBuf) Slice(off, length int64) ([]byte, error) { - if off > rb.end || off < rb.begin { - return nil, ErrOutOfRange - } - readOff := rb.getDataOff(off) - readEnd := readOff + int(length) - if readEnd <= len(rb.data) { - return rb.data[readOff:readEnd:readEnd], nil - } - buf := make([]byte, length) - n := copy(buf, rb.data[readOff:]) - if n < int(length) { - n += copy(buf[n:], rb.data[:readEnd-len(rb.data)]) - } - if n < int(length) { - return nil, io.EOF - } - return buf, nil -} - -func (rb *RingBuf) Write(p []byte) (n int, err error) { - if len(p) > len(rb.data) { - err = ErrOutOfRange - return - } - for n < len(p) { - written := copy(rb.data[rb.index:], p[n:]) - rb.end += int64(written) - n += written - rb.index += written - if rb.index >= len(rb.data) { - rb.index -= len(rb.data) - } - } - if int(rb.end-rb.begin) > len(rb.data) { - rb.begin = rb.end - int64(len(rb.data)) - } - return -} - -func (rb *RingBuf) WriteAt(p []byte, off int64) (n int, err error) { - if off+int64(len(p)) > rb.end || off < rb.begin { - err = ErrOutOfRange - return - } - writeOff := rb.getDataOff(off) - writeEnd := writeOff + int(rb.end-off) - if writeEnd <= len(rb.data) { - n = copy(rb.data[writeOff:writeEnd], p) - } else { - n = copy(rb.data[writeOff:], p) - if n < len(p) { - n += copy(rb.data[:writeEnd-len(rb.data)], p[n:]) - } - } - return -} - -func (rb *RingBuf) EqualAt(p []byte, off int64) bool { - if off+int64(len(p)) > rb.end || off < rb.begin { - return false - } - readOff := rb.getDataOff(off) - readEnd := readOff + len(p) - if readEnd <= len(rb.data) { - return bytes.Equal(p, rb.data[readOff:readEnd]) - } else { - firstLen := len(rb.data) - readOff - equal := bytes.Equal(p[:firstLen], rb.data[readOff:]) - if equal { - secondLen := len(p) - firstLen - equal = bytes.Equal(p[firstLen:], rb.data[:secondLen]) - } - return equal - } -} - -// Evacuate read the data at off, then write it to the the data stream, -// Keep it from being overwritten by new data. -func (rb *RingBuf) Evacuate(off int64, length int) (newOff int64) { - if off+int64(length) > rb.end || off < rb.begin { - return -1 - } - readOff := rb.getDataOff(off) - if readOff == rb.index { - // no copy evacuate - rb.index += length - if rb.index >= len(rb.data) { - rb.index -= len(rb.data) - } - } else if readOff < rb.index { - var n = copy(rb.data[rb.index:], rb.data[readOff:readOff+length]) - rb.index += n - if rb.index == len(rb.data) { - rb.index = copy(rb.data, rb.data[readOff+n:readOff+length]) - } - } else { - var readEnd = readOff + length - var n int - if readEnd <= len(rb.data) { - n = copy(rb.data[rb.index:], rb.data[readOff:readEnd]) - rb.index += n - } else { - n = copy(rb.data[rb.index:], rb.data[readOff:]) - rb.index += n - var tail = length - n - n = copy(rb.data[rb.index:], rb.data[:tail]) - rb.index += n - if rb.index == len(rb.data) { - rb.index = copy(rb.data, rb.data[n:tail]) - } - } - } - newOff = rb.end - rb.end += int64(length) - if rb.begin < rb.end-int64(len(rb.data)) { - rb.begin = rb.end - int64(len(rb.data)) - } - return -} - -func (rb *RingBuf) Resize(newSize int) { - if len(rb.data) == newSize { - return - } - newData := make([]byte, newSize) - var offset int - if rb.end-rb.begin == int64(len(rb.data)) { - offset = rb.index - } - if int(rb.end-rb.begin) > newSize { - discard := int(rb.end-rb.begin) - newSize - offset = (offset + discard) % len(rb.data) - rb.begin = rb.end - int64(newSize) - } - n := copy(newData, rb.data[offset:]) - if n < newSize { - copy(newData[n:], rb.data[:offset]) - } - rb.data = newData - rb.index = 0 -} - -func (rb *RingBuf) Skip(length int64) { - rb.end += length - rb.index += int(length) - for rb.index >= len(rb.data) { - rb.index -= len(rb.data) - } - if int(rb.end-rb.begin) > len(rb.data) { - rb.begin = rb.end - int64(len(rb.data)) - } -} diff --git a/vendor/github.com/coocood/freecache/segment.go b/vendor/github.com/coocood/freecache/segment.go deleted file mode 100644 index bcc90a9..0000000 --- a/vendor/github.com/coocood/freecache/segment.go +++ /dev/null @@ -1,485 +0,0 @@ -package freecache - -import ( - "errors" - "sync/atomic" - "unsafe" -) - -const HASH_ENTRY_SIZE = 16 -const ENTRY_HDR_SIZE = 24 - -var ErrLargeKey = errors.New("The key is larger than 65535") -var ErrLargeEntry = errors.New("The entry size is larger than 1/1024 of cache size") -var ErrNotFound = errors.New("Entry not found") - -// entry pointer struct points to an entry in ring buffer -type entryPtr struct { - offset int64 // entry offset in ring buffer - hash16 uint16 // entries are ordered by hash16 in a slot. - keyLen uint16 // used to compare a key - reserved uint32 -} - -// entry header struct in ring buffer, followed by key and value. -type entryHdr struct { - accessTime uint32 - expireAt uint32 - keyLen uint16 - hash16 uint16 - valLen uint32 - valCap uint32 - deleted bool - slotId uint8 - reserved uint16 -} - -// a segment contains 256 slots, a slot is an array of entry pointers ordered by hash16 value -// the entry can be looked up by hash value of the key. -type segment struct { - rb RingBuf // ring buffer that stores data - segId int - _ uint32 - missCount int64 - hitCount int64 - entryCount int64 - totalCount int64 // number of entries in ring buffer, including deleted entries. - totalTime int64 // used to calculate least recent used entry. - timer Timer // Timer giving current time - totalEvacuate int64 // used for debug - totalExpired int64 // used for debug - overwrites int64 // used for debug - touched int64 // used for debug - vacuumLen int64 // up to vacuumLen, new data can be written without overwriting old data. - slotLens [256]int32 // The actual length for every slot. - slotCap int32 // max number of entry pointers a slot can hold. - slotsData []entryPtr // shared by all 256 slots -} - -func newSegment(bufSize int, segId int, timer Timer) (seg segment) { - seg.rb = NewRingBuf(bufSize, 0) - seg.segId = segId - seg.timer = timer - seg.vacuumLen = int64(bufSize) - seg.slotCap = 1 - seg.slotsData = make([]entryPtr, 256*seg.slotCap) - return -} - -func (seg *segment) set(key, value []byte, hashVal uint64, expireSeconds int) (err error) { - if len(key) > 65535 { - return ErrLargeKey - } - maxKeyValLen := len(seg.rb.data)/4 - ENTRY_HDR_SIZE - if len(key)+len(value) > maxKeyValLen { - // Do not accept large entry. - return ErrLargeEntry - } - now := seg.timer.Now() - expireAt := uint32(0) - if expireSeconds > 0 { - expireAt = now + uint32(expireSeconds) - } - - slotId := uint8(hashVal >> 8) - hash16 := uint16(hashVal >> 16) - slot := seg.getSlot(slotId) - idx, match := seg.lookup(slot, hash16, key) - - var hdrBuf [ENTRY_HDR_SIZE]byte - hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) - if match { - matchedPtr := &slot[idx] - seg.rb.ReadAt(hdrBuf[:], matchedPtr.offset) - hdr.slotId = slotId - hdr.hash16 = hash16 - hdr.keyLen = uint16(len(key)) - originAccessTime := hdr.accessTime - hdr.accessTime = now - hdr.expireAt = expireAt - hdr.valLen = uint32(len(value)) - if hdr.valCap >= hdr.valLen { - //in place overwrite - atomic.AddInt64(&seg.totalTime, int64(hdr.accessTime)-int64(originAccessTime)) - seg.rb.WriteAt(hdrBuf[:], matchedPtr.offset) - seg.rb.WriteAt(value, matchedPtr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen)) - atomic.AddInt64(&seg.overwrites, 1) - return - } - // avoid unnecessary memory copy. - seg.delEntryPtr(slotId, slot, idx) - match = false - // increase capacity and limit entry len. - for hdr.valCap < hdr.valLen { - hdr.valCap *= 2 - } - if hdr.valCap > uint32(maxKeyValLen-len(key)) { - hdr.valCap = uint32(maxKeyValLen - len(key)) - } - } else { - hdr.slotId = slotId - hdr.hash16 = hash16 - hdr.keyLen = uint16(len(key)) - hdr.accessTime = now - hdr.expireAt = expireAt - hdr.valLen = uint32(len(value)) - hdr.valCap = uint32(len(value)) - if hdr.valCap == 0 { // avoid infinite loop when increasing capacity. - hdr.valCap = 1 - } - } - - entryLen := ENTRY_HDR_SIZE + int64(len(key)) + int64(hdr.valCap) - slotModified := seg.evacuate(entryLen, slotId, now) - if slotModified { - // the slot has been modified during evacuation, we need to looked up for the 'idx' again. - // otherwise there would be index out of bound error. - slot = seg.getSlot(slotId) - idx, match = seg.lookup(slot, hash16, key) - // assert(match == false) - } - newOff := seg.rb.End() - seg.insertEntryPtr(slotId, hash16, newOff, idx, hdr.keyLen) - seg.rb.Write(hdrBuf[:]) - seg.rb.Write(key) - seg.rb.Write(value) - seg.rb.Skip(int64(hdr.valCap - hdr.valLen)) - atomic.AddInt64(&seg.totalTime, int64(now)) - atomic.AddInt64(&seg.totalCount, 1) - seg.vacuumLen -= entryLen - return -} - -func (seg *segment) touch(key []byte, hashVal uint64, expireSeconds int) (err error) { - if len(key) > 65535 { - return ErrLargeKey - } - - slotId := uint8(hashVal >> 8) - hash16 := uint16(hashVal >> 16) - slot := seg.getSlot(slotId) - idx, match := seg.lookup(slot, hash16, key) - if !match { - err = ErrNotFound - return - } - matchedPtr := &slot[idx] - - var hdrBuf [ENTRY_HDR_SIZE]byte - seg.rb.ReadAt(hdrBuf[:], matchedPtr.offset) - hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) - - now := seg.timer.Now() - if isExpired(hdr.expireAt, now) { - seg.delEntryPtr(slotId, slot, idx) - atomic.AddInt64(&seg.totalExpired, 1) - err = ErrNotFound - atomic.AddInt64(&seg.missCount, 1) - return - } - - expireAt := uint32(0) - if expireSeconds > 0 { - expireAt = now + uint32(expireSeconds) - } - - originAccessTime := hdr.accessTime - hdr.accessTime = now - hdr.expireAt = expireAt - //in place overwrite - atomic.AddInt64(&seg.totalTime, int64(hdr.accessTime)-int64(originAccessTime)) - seg.rb.WriteAt(hdrBuf[:], matchedPtr.offset) - atomic.AddInt64(&seg.touched, 1) - return -} - -func (seg *segment) evacuate(entryLen int64, slotId uint8, now uint32) (slotModified bool) { - var oldHdrBuf [ENTRY_HDR_SIZE]byte - consecutiveEvacuate := 0 - for seg.vacuumLen < entryLen { - oldOff := seg.rb.End() + seg.vacuumLen - seg.rb.Size() - seg.rb.ReadAt(oldHdrBuf[:], oldOff) - oldHdr := (*entryHdr)(unsafe.Pointer(&oldHdrBuf[0])) - oldEntryLen := ENTRY_HDR_SIZE + int64(oldHdr.keyLen) + int64(oldHdr.valCap) - if oldHdr.deleted { - consecutiveEvacuate = 0 - atomic.AddInt64(&seg.totalTime, -int64(oldHdr.accessTime)) - atomic.AddInt64(&seg.totalCount, -1) - seg.vacuumLen += oldEntryLen - continue - } - expired := isExpired(oldHdr.expireAt, now) - leastRecentUsed := int64(oldHdr.accessTime)*atomic.LoadInt64(&seg.totalCount) <= atomic.LoadInt64(&seg.totalTime) - if expired || leastRecentUsed || consecutiveEvacuate > 5 { - seg.delEntryPtrByOffset(oldHdr.slotId, oldHdr.hash16, oldOff) - if oldHdr.slotId == slotId { - slotModified = true - } - consecutiveEvacuate = 0 - atomic.AddInt64(&seg.totalTime, -int64(oldHdr.accessTime)) - atomic.AddInt64(&seg.totalCount, -1) - seg.vacuumLen += oldEntryLen - if expired { - atomic.AddInt64(&seg.totalExpired, 1) - } else { - atomic.AddInt64(&seg.totalEvacuate, 1) - } - } else { - // evacuate an old entry that has been accessed recently for better cache hit rate. - newOff := seg.rb.Evacuate(oldOff, int(oldEntryLen)) - seg.updateEntryPtr(oldHdr.slotId, oldHdr.hash16, oldOff, newOff) - consecutiveEvacuate++ - atomic.AddInt64(&seg.totalEvacuate, 1) - } - } - return -} - -func (seg *segment) get(key, buf []byte, hashVal uint64, peek bool) (value []byte, expireAt uint32, err error) { - hdr, ptr, err := seg.locate(key, hashVal, peek) - if err != nil { - return - } - expireAt = hdr.expireAt - if cap(buf) >= int(hdr.valLen) { - value = buf[:hdr.valLen] - } else { - value = make([]byte, hdr.valLen) - } - - seg.rb.ReadAt(value, ptr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen)) - if !peek { - atomic.AddInt64(&seg.hitCount, 1) - } - return -} - -// view provides zero-copy access to the element's value, without copying to -// an intermediate buffer. -func (seg *segment) view(key []byte, fn func([]byte) error, hashVal uint64, peek bool) (err error) { - hdr, ptr, err := seg.locate(key, hashVal, peek) - if err != nil { - return - } - start := ptr.offset + ENTRY_HDR_SIZE + int64(hdr.keyLen) - val, err := seg.rb.Slice(start, int64(hdr.valLen)) - if err != nil { - return err - } - err = fn(val) - if !peek { - atomic.AddInt64(&seg.hitCount, 1) - } - return -} - -func (seg *segment) locate(key []byte, hashVal uint64, peek bool) (hdr *entryHdr, ptr *entryPtr, err error) { - slotId := uint8(hashVal >> 8) - hash16 := uint16(hashVal >> 16) - slot := seg.getSlot(slotId) - idx, match := seg.lookup(slot, hash16, key) - if !match { - err = ErrNotFound - if !peek { - atomic.AddInt64(&seg.missCount, 1) - } - return - } - ptr = &slot[idx] - - var hdrBuf [ENTRY_HDR_SIZE]byte - seg.rb.ReadAt(hdrBuf[:], ptr.offset) - hdr = (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) - if !peek { - now := seg.timer.Now() - if isExpired(hdr.expireAt, now) { - seg.delEntryPtr(slotId, slot, idx) - atomic.AddInt64(&seg.totalExpired, 1) - err = ErrNotFound - atomic.AddInt64(&seg.missCount, 1) - return - } - atomic.AddInt64(&seg.totalTime, int64(now-hdr.accessTime)) - hdr.accessTime = now - seg.rb.WriteAt(hdrBuf[:], ptr.offset) - } - return hdr, ptr, err -} - -func (seg *segment) del(key []byte, hashVal uint64) (affected bool) { - slotId := uint8(hashVal >> 8) - hash16 := uint16(hashVal >> 16) - slot := seg.getSlot(slotId) - idx, match := seg.lookup(slot, hash16, key) - if !match { - return false - } - seg.delEntryPtr(slotId, slot, idx) - return true -} - -func (seg *segment) ttl(key []byte, hashVal uint64) (timeLeft uint32, err error) { - slotId := uint8(hashVal >> 8) - hash16 := uint16(hashVal >> 16) - slot := seg.getSlot(slotId) - idx, match := seg.lookup(slot, hash16, key) - if !match { - err = ErrNotFound - return - } - ptr := &slot[idx] - - var hdrBuf [ENTRY_HDR_SIZE]byte - seg.rb.ReadAt(hdrBuf[:], ptr.offset) - hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0])) - - if hdr.expireAt == 0 { - return - } else { - now := seg.timer.Now() - if !isExpired(hdr.expireAt, now) { - timeLeft = hdr.expireAt - now - return - } - } - err = ErrNotFound - return -} - -func (seg *segment) expand() { - newSlotData := make([]entryPtr, seg.slotCap*2*256) - for i := 0; i < 256; i++ { - off := int32(i) * seg.slotCap - copy(newSlotData[off*2:], seg.slotsData[off:off+seg.slotLens[i]]) - } - seg.slotCap *= 2 - seg.slotsData = newSlotData -} - -func (seg *segment) updateEntryPtr(slotId uint8, hash16 uint16, oldOff, newOff int64) { - slot := seg.getSlot(slotId) - idx, match := seg.lookupByOff(slot, hash16, oldOff) - if !match { - return - } - ptr := &slot[idx] - ptr.offset = newOff -} - -func (seg *segment) insertEntryPtr(slotId uint8, hash16 uint16, offset int64, idx int, keyLen uint16) { - if seg.slotLens[slotId] == seg.slotCap { - seg.expand() - } - seg.slotLens[slotId]++ - atomic.AddInt64(&seg.entryCount, 1) - slot := seg.getSlot(slotId) - copy(slot[idx+1:], slot[idx:]) - slot[idx].offset = offset - slot[idx].hash16 = hash16 - slot[idx].keyLen = keyLen -} - -func (seg *segment) delEntryPtrByOffset(slotId uint8, hash16 uint16, offset int64) { - slot := seg.getSlot(slotId) - idx, match := seg.lookupByOff(slot, hash16, offset) - if !match { - return - } - seg.delEntryPtr(slotId, slot, idx) -} - -func (seg *segment) delEntryPtr(slotId uint8, slot []entryPtr, idx int) { - offset := slot[idx].offset - var entryHdrBuf [ENTRY_HDR_SIZE]byte - seg.rb.ReadAt(entryHdrBuf[:], offset) - entryHdr := (*entryHdr)(unsafe.Pointer(&entryHdrBuf[0])) - entryHdr.deleted = true - seg.rb.WriteAt(entryHdrBuf[:], offset) - copy(slot[idx:], slot[idx+1:]) - seg.slotLens[slotId]-- - atomic.AddInt64(&seg.entryCount, -1) -} - -func entryPtrIdx(slot []entryPtr, hash16 uint16) (idx int) { - high := len(slot) - for idx < high { - mid := (idx + high) >> 1 - oldEntry := &slot[mid] - if oldEntry.hash16 < hash16 { - idx = mid + 1 - } else { - high = mid - } - } - return -} - -func (seg *segment) lookup(slot []entryPtr, hash16 uint16, key []byte) (idx int, match bool) { - idx = entryPtrIdx(slot, hash16) - for idx < len(slot) { - ptr := &slot[idx] - if ptr.hash16 != hash16 { - break - } - match = int(ptr.keyLen) == len(key) && seg.rb.EqualAt(key, ptr.offset+ENTRY_HDR_SIZE) - if match { - return - } - idx++ - } - return -} - -func (seg *segment) lookupByOff(slot []entryPtr, hash16 uint16, offset int64) (idx int, match bool) { - idx = entryPtrIdx(slot, hash16) - for idx < len(slot) { - ptr := &slot[idx] - if ptr.hash16 != hash16 { - break - } - match = ptr.offset == offset - if match { - return - } - idx++ - } - return -} - -func (seg *segment) resetStatistics() { - atomic.StoreInt64(&seg.totalEvacuate, 0) - atomic.StoreInt64(&seg.totalExpired, 0) - atomic.StoreInt64(&seg.overwrites, 0) - atomic.StoreInt64(&seg.hitCount, 0) - atomic.StoreInt64(&seg.missCount, 0) -} - -func (seg *segment) clear() { - bufSize := len(seg.rb.data) - seg.rb.Reset(0) - seg.vacuumLen = int64(bufSize) - seg.slotCap = 1 - seg.slotsData = make([]entryPtr, 256*seg.slotCap) - for i := 0; i < len(seg.slotLens); i++ { - seg.slotLens[i] = 0 - } - - atomic.StoreInt64(&seg.hitCount, 0) - atomic.StoreInt64(&seg.missCount, 0) - atomic.StoreInt64(&seg.entryCount, 0) - atomic.StoreInt64(&seg.totalCount, 0) - atomic.StoreInt64(&seg.totalTime, 0) - atomic.StoreInt64(&seg.totalEvacuate, 0) - atomic.StoreInt64(&seg.totalExpired, 0) - atomic.StoreInt64(&seg.overwrites, 0) -} - -func (seg *segment) getSlot(slotId uint8) []entryPtr { - slotOff := int32(slotId) * seg.slotCap - return seg.slotsData[slotOff : slotOff+seg.slotLens[slotId] : slotOff+seg.slotCap] -} - -// isExpired checks if a key is expired. -func isExpired(keyExpireAt, now uint32) bool { - return keyExpireAt != 0 && keyExpireAt <= now -} diff --git a/vendor/github.com/coocood/freecache/timer.go b/vendor/github.com/coocood/freecache/timer.go deleted file mode 100644 index d4d032c..0000000 --- a/vendor/github.com/coocood/freecache/timer.go +++ /dev/null @@ -1,79 +0,0 @@ -package freecache - -import ( - "sync/atomic" - "time" -) - -// Timer holds representation of current time. -type Timer interface { - // Give current time (in seconds) - Now() uint32 -} - -// Timer that must be stopped. -type StoppableTimer interface { - Timer - - // Release resources of the timer, functionality may or may not be affected - // It is not called automatically, so user must call it just once - Stop() -} - -// Helper function that returns Unix time in seconds -func getUnixTime() uint32 { - return uint32(time.Now().Unix()) -} - -// Default timer reads Unix time always when requested -type defaultTimer struct{} - -func (timer defaultTimer) Now() uint32 { - return getUnixTime() -} - -// Cached timer stores Unix time every second and returns the cached value -type cachedTimer struct { - now uint32 - ticker *time.Ticker - done chan bool -} - -// Create cached timer and start runtime timer that updates time every second -func NewCachedTimer() StoppableTimer { - timer := &cachedTimer{ - now: getUnixTime(), - ticker: time.NewTicker(time.Second), - done: make(chan bool), - } - - go timer.update() - - return timer -} - -func (timer *cachedTimer) Now() uint32 { - return atomic.LoadUint32(&timer.now) -} - -// Stop runtime timer and finish routine that updates time -func (timer *cachedTimer) Stop() { - timer.ticker.Stop() - timer.done <- true - close(timer.done) - - timer.done = nil - timer.ticker = nil -} - -// Periodically check and update of time -func (timer *cachedTimer) update() { - for { - select { - case <-timer.done: - return - case <-timer.ticker.C: - atomic.StoreUint32(&timer.now, getUnixTime()) - } - } -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 7929947..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml b/vendor/github.com/dgraph-io/ristretto/.deepsource.toml deleted file mode 100644 index 40609ef..0000000 --- a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml +++ /dev/null @@ -1,17 +0,0 @@ -version = 1 - -test_patterns = [ - '**/*_test.go' -] - -exclude_patterns = [ - -] - -[[analyzers]] -name = 'go' -enabled = true - - - [analyzers.meta] - import_path = 'github.com/dgraph-io/ristretto' diff --git a/vendor/github.com/dgraph-io/ristretto/.go-version b/vendor/github.com/dgraph-io/ristretto/.go-version deleted file mode 100644 index b8f1e3f..0000000 --- a/vendor/github.com/dgraph-io/ristretto/.go-version +++ /dev/null @@ -1 +0,0 @@ -1.17.11 diff --git a/vendor/github.com/dgraph-io/ristretto/.golangci.yml b/vendor/github.com/dgraph-io/ristretto/.golangci.yml deleted file mode 100644 index 7318e9a..0000000 --- a/vendor/github.com/dgraph-io/ristretto/.golangci.yml +++ /dev/null @@ -1,23 +0,0 @@ -run: - tests: false - skip-dirs: - - contrib - - sim - -linters-settings: - lll: - line-length: 120 - -linters: - disable-all: true - enable: - #- errcheck - #- ineffassign - - gas - #- gofmt - #- golint - #- gosimple - #- govet - - lll - #- varcheck - #- unused diff --git a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md b/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md deleted file mode 100644 index 3d18e39..0000000 --- a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md +++ /dev/null @@ -1,187 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project will adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html) starting v1.0.0. - -## [0.1.1] - 2022-10-12 - -[0.1.1]: https://github.com/dgraph-io/ristretto/compare/v0.1.0..v0.1.1 -This release fixes certain arm64 build issues in the z package. It also -incorporates CI steps in our repository. - -### Changed -- [chore(docs): Include SpiceDB in the list of projects using Ristretto (#285)](https://github.com/dgraph-io/ristretto/pull/311) - -### Added -- [Run CI Jobs via Github Actions #304](https://github.com/dgraph-io/ristretto/pull/304) - -### Fixed -- [fix(build): update x/sys dependency](https://github.com/dgraph-io/ristretto/pull/308) -- [fix(z): Address inconsistent mremap return arguments with arm64](https://github.com/dgraph-io/ristretto/pull/309) -- [fix(z): runtime error: index out of range for !amd64 env #287](https://github.com/dgraph-io/ristretto/pull/307) - -## [0.1.0] - 2021-06-03 - -[0.1.0]: https://github.com/dgraph-io/ristretto/compare/v0.0.3..v0.1.0 -This release contains bug fixes and improvements to Ristretto. It also contains -major updates to the z package. The z package contains types such as Tree (B+ -tree), Buffer, Mmap file, etc. All these types are used in Badger and Dgraph to -improve performance and reduce memory requirements. - -### Changed -- Make item public. Add a new onReject call for rejected items. (#180) - -### Added -- Use z.Buffer backing for B+ tree (#268) -- expose GetTTL function (#270) -- docs(README): Ristretto is production-ready. (#267) -- Add IterateKV (#265) -- feat(super-flags): Add GetPath method in superflags (#258) -- add GetDuration to SuperFlag (#248) -- add Has, GetFloat64, and GetInt64 to SuperFlag (#247) -- move SuperFlag to Ristretto (#246) -- add SuperFlagHelp tool to generate flag help text (#251) -- allow empty defaults in SuperFlag (#254) -- add mmaped b+ tree (#207) -- Add API to allow the MaxCost of an existing cache to be updated. (#200) -- Add OnExit handler which can be used for manual memory management (#183) -- Add life expectancy histogram (#182) -- Add mechanism to wait for items to be processed. (#184) - -### Fixed -- change expiration type from int64 to time.Time (#277) -- fix(buffer): make buffer capacity atleast defaultCapacity (#273) -- Fixes for z.PersistentTree (#272) -- Initialize persistent tree correctly (#271) -- use xxhash v2 (#266) -- update comments to correctly reflect counter space usage (#189) -- enable riscv64 builds (#264) -- Switch from log to glog (#263) -- Use Fibonacci for latency numbers -- cache: fix race when clearning a cache (#261) -- Check for keys without values in superflags (#259) -- chore(perf): using tags instead of runtime callers to improve the performance of leak detection (#255) -- fix(Flags): panic on user errors (#256) -- fix SuperFlagHelp newline (#252) -- fix(arm): Fix crashing under ARMv6 due to memory mis-alignment (#239) -- Fix incorrect unit test coverage depiction (#245) -- chore(histogram): adding percentile in histogram (#241) -- fix(windows): use filepath instead of path (#244) -- fix(MmapFile): Close the fd before deleting the file (#242) -- Fixes CGO_ENABLED=0 compilation error (#240) -- fix(build): fix build on non-amd64 architectures (#238) -- fix(b+tree): Do not double the size of btree (#237) -- fix(jemalloc): Fix the stats of jemalloc (#236) -- Don't print stuff, only return strings. -- Bring memclrNoHeapPointers to z (#235) -- increase number of buffers from 32 to 64 in allocator (#234) -- Set minSize to 1MB. -- Opt(btree): Use Go memory instead of mmap files -- Opt(btree): Lightweight stats calculation -- Put padding internally to z.Buffer -- Chore(z): Add SetTmpDir API to set the temp directory (#233) -- Add a BufferFrom -- Bring z.Allocator and z.AllocatorPool back -- Fix(z.Allocator): Make Allocator use Go memory -- Updated ZeroOut to use a simple for loop. (#231) -- Add concurrency back -- Add a test to check concurrency of Allocator. -- Fix(buffer): Expose padding by z.Buffer's APIs and fix test (#222) -- AllocateSlice should Truncate if the file is not big enough (#226) -- Zero out allocations for structs now that we're reusing Allocators. -- Fix the ristretto substring -- Deal with nil z.AllocatorPool -- Create an AllocatorPool class. -- chore(btree): clean NewTree API (#225) -- fix(MmapFile): Don't error out if fileSize > sz (#224) -- feat(btree): allow option to reset btree and mmaping it to specified file. (#223) -- Use mremap on Linux instead of munmap+mmap (#221) -- Reuse pages in B+ tree (#220) -- fix(allocator): make nil allocator return go byte slice (#217) -- fix(buffer): Make padding internal to z.buffer (#216) -- chore(buffer): add a parent directory field in z.Buffer (#215) -- Make Allocator concurrent -- Fix infinite loop in allocator (#214) -- Add trim func -- Use allocator pool. Turn off freelist. -- Add freelists to Allocator to reuse. -- make DeleteBelow delete values that are less than lo (#211) -- Avoid an unnecessary Load procedure in IncrementOffset. -- Add Stats method in Btree. -- chore(script): fix local test script (#210) -- fix(btree): Increase buffer size if needed. (#209) -- chore(btree): add occupancy ratio, search benchmark and compact bug fix (#208) -- Add licenses, remove prints, and fix a bug in compact -- Add IncrementOffset API for z.buffers (#206) -- Show count when printing histogram (#201) -- Zbuffer: Add LenNoPadding and make padding 8 bytes (#204) -- Allocate Go memory in case allocator is nil. -- Add leak detection via leak build flag and fix a leak during cache.Close. -- Add some APIs for allocator and buffer -- Sync before truncation or close. -- Handle nil MmapFile for Sync. -- Public methods must not panic after Close() (#202) -- Check for RD_ONLY correctly. -- Modify MmapFile APIs -- Add a bunch of APIs around MmapFile -- Move APIs for mmapfile creation over to z package. -- Add ZeroOut func -- Add SliceOffsets -- z: Add TotalSize method on bloom filter (#197) -- Add Msync func -- Buffer: Use 256 GB mmap size instead of MaxInt64 (#198) -- Add a simple test to check next2Pow -- Improve memory performance (#195) -- Have a way to automatically mmap a growing buffer (#196) -- Introduce Mmapped buffers and Merge Sort (#194) -- Add a way to access an allocator via reference. -- Use jemalloc.a to ensure compilation with the Go binary -- Fix up a build issue with ReadMemStats -- Add ReadMemStats function (#193) -- Allocator helps allocate memory to be used by unsafe structs (#192) -- Improve histogram output -- Move Closer from y to z (#191) -- Add histogram.Mean() method (#188) -- Introduce Calloc: Manual Memory Management via jemalloc (#186) - -## [0.0.3] - 2020-07-06 - -[0.0.3]: https://github.com/dgraph-io/ristretto/compare/v0.0.2..v0.0.3 - -### Changed - -### Added - -### Fixed - -- z: use MemHashString and xxhash.Sum64String ([#153][]) -- Check conflict key before updating expiration map. ([#154][]) -- Fix race condition in Cache.Clear ([#133][]) -- Improve handling of updated items ([#168][]) -- Fix droppedSets count while updating the item ([#171][]) - -## [0.0.2] - 2020-02-24 - -[0.0.2]: https://github.com/dgraph-io/ristretto/compare/v0.0.1..v0.0.2 - -### Added - -- Sets with TTL. ([#122][]) - -### Fixed - -- Fix the way metrics are handled for deletions. ([#111][]) -- Support nil `*Cache` values in `Clear` and `Close`. ([#119][]) -- Delete item immediately. ([#113][]) -- Remove key from policy after TTL eviction. ([#130][]) - -[#111]: https://github.com/dgraph-io/ristretto/issues/111 -[#113]: https://github.com/dgraph-io/ristretto/issues/113 -[#119]: https://github.com/dgraph-io/ristretto/issues/119 -[#122]: https://github.com/dgraph-io/ristretto/issues/122 -[#130]: https://github.com/dgraph-io/ristretto/issues/130 - -## 0.0.1 - -First release. Basic cache functionality based on a LFU policy. diff --git a/vendor/github.com/dgraph-io/ristretto/LICENSE b/vendor/github.com/dgraph-io/ristretto/LICENSE deleted file mode 100644 index d9a10c0..0000000 --- a/vendor/github.com/dgraph-io/ristretto/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/dgraph-io/ristretto/README.md b/vendor/github.com/dgraph-io/ristretto/README.md deleted file mode 100644 index e71ae3d..0000000 --- a/vendor/github.com/dgraph-io/ristretto/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# Ristretto -[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/dgraph-io/ristretto) -[![ci-ristretto-tests](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-tests.yml/badge.svg)](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-tests.yml) -[![ci-ristretto-lint](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-lint.yml/badge.svg)](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-lint.yml) -[![Coverage Status](https://coveralls.io/repos/github/dgraph-io/ristretto/badge.svg?branch=main)](https://coveralls.io/github/dgraph-io/ristretto?branch=main) -[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/dgraph-io/ristretto) - -Ristretto is a fast, concurrent cache library built with a focus on performance and correctness. - -The motivation to build Ristretto comes from the need for a contention-free -cache in [Dgraph][]. - -[Dgraph]: https://github.com/dgraph-io/dgraph - -## Features - -* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class. - * **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces. - * **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter). -* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput. -* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything). -* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation. -* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats. -* **Simple API** - just figure out your ideal `Config` values and you're off and running. - -## Status - -Ristretto is production-ready. See [Projects using Ristretto](#projects-using-ristretto). - -## Table of Contents - -* [Usage](#Usage) - * [Example](#Example) - * [Config](#Config) - * [NumCounters](#Config) - * [MaxCost](#Config) - * [BufferItems](#Config) - * [Metrics](#Config) - * [OnEvict](#Config) - * [KeyToHash](#Config) - * [Cost](#Config) -* [Benchmarks](#Benchmarks) - * [Hit Ratios](#Hit-Ratios) - * [Search](#Search) - * [Database](#Database) - * [Looping](#Looping) - * [CODASYL](#CODASYL) - * [Throughput](#Throughput) - * [Mixed](#Mixed) - * [Read](#Read) - * [Write](#Write) -* [Projects using Ristretto](#projects-using-ristretto) -* [FAQ](#FAQ) - -## Usage - -### Example - -```go -func main() { - cache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: 1e7, // number of keys to track frequency of (10M). - MaxCost: 1 << 30, // maximum cost of cache (1GB). - BufferItems: 64, // number of keys per Get buffer. - }) - if err != nil { - panic(err) - } - - // set a value with a cost of 1 - cache.Set("key", "value", 1) - - // wait for value to pass through buffers - time.Sleep(10 * time.Millisecond) - - value, found := cache.Get("key") - if !found { - panic("missing value") - } - fmt.Println(value) - cache.Del("key") -} -``` - -### Config - -The `Config` struct is passed to `NewCache` when creating Ristretto instances (see the example above). - -**NumCounters** `int64` - -NumCounters is the number of 4-bit access counters to keep for admission and eviction. We've seen good performance in setting this to 10x the number of items you expect to keep in the cache when full. - -For example, if you expect each item to have a cost of 1 and MaxCost is 100, set NumCounters to 1,000. Or, if you use variable cost values but expect the cache to hold around 10,000 items when full, set NumCounters to 100,000. The important thing is the *number of unique items* in the full cache, not necessarily the MaxCost value. - -**MaxCost** `int64` - -MaxCost is how eviction decisions are made. For example, if MaxCost is 100 and a new item with a cost of 1 increases total cache cost to 101, 1 item will be evicted. - -MaxCost can also be used to denote the max size in bytes. For example, if MaxCost is 1,000,000 (1MB) and the cache is full with 1,000 1KB items, a new item (that's accepted) would cause 5 1KB items to be evicted. - -MaxCost could be anything as long as it matches how you're using the cost values when calling Set. - -**BufferItems** `int64` - -BufferItems is the size of the Get buffers. The best value we've found for this is 64. - -If for some reason you see Get performance decreasing with lots of contention (you shouldn't), try increasing this value in increments of 64. This is a fine-tuning mechanism and you probably won't have to touch this. - -**Metrics** `bool` - -Metrics is true when you want real-time logging of a variety of stats. The reason this is a Config flag is because there's a 10% throughput performance overhead. - -**OnEvict** `func(hashes [2]uint64, value interface{}, cost int64)` - -OnEvict is called for every eviction. - -**KeyToHash** `func(key interface{}) [2]uint64` - -KeyToHash is the hashing algorithm used for every key. If this is nil, Ristretto has a variety of [defaults depending on the underlying interface type](https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41). - -Note that if you want 128bit hashes you should use the full `[2]uint64`, -otherwise just fill the `uint64` at the `0` position and it will behave like -any 64bit hash. - -**Cost** `func(value interface{}) int64` - -Cost is an optional function you can pass to the Config in order to evaluate -item cost at runtime, and only for the Set calls that aren't dropped (this is -useful if calculating item cost is particularly expensive and you don't want to -waste time on items that will be dropped anyways). - -To signal to Ristretto that you'd like to use this Cost function: - -1. Set the Cost field to a non-nil function. -2. When calling Set for new items or item updates, use a `cost` of 0. - -## Benchmarks - -The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto. - -### Hit Ratios - -#### Search - -This trace is described as "disk read accesses initiated by a large commercial -search engine in response to various web search requests." - -

- -

- -#### Database - -This trace is described as "a database server running at a commercial site -running an ERP application on top of a commercial database." - -

- -

- -#### Looping - -This trace demonstrates a looping access pattern. - -

- -

- -#### CODASYL - -This trace is described as "references to a CODASYL database for a one hour -period." - -

- -

- -### Throughput - -All throughput benchmarks were ran on an Intel Core i7-8700K (3.7GHz) with 16gb -of RAM. - -#### Mixed - -

- -

- -#### Read - -

- -

- -#### Write - -

- -

- -## Projects Using Ristretto - -Below is a list of known projects that use Ristretto: - -- [Badger](https://github.com/dgraph-io/badger) - Embeddable key-value DB in Go -- [Dgraph](https://github.com/dgraph-io/dgraph) - Horizontally scalable and distributed GraphQL database with a graph backend -- [Vitess](https://github.com/vitessio/vitess) - Database clustering system for horizontal scaling of MySQL -- [SpiceDB](https://github.com/authzed/spicedb) - Horizontally scalable permissions database - -## FAQ - -### How are you achieving this performance? What shortcuts are you taking? - -We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy. - -As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make it in the cache. - -### Is Ristretto distributed? - -No, it's just like any other Go library that you can import into your project and use in a single process. diff --git a/vendor/github.com/dgraph-io/ristretto/cache.go b/vendor/github.com/dgraph-io/ristretto/cache.go deleted file mode 100644 index 7226245..0000000 --- a/vendor/github.com/dgraph-io/ristretto/cache.go +++ /dev/null @@ -1,719 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Ristretto is a fast, fixed size, in-memory cache with a dual focus on -// throughput and hit ratio performance. You can easily add Ristretto to an -// existing system and keep the most valuable data where you need it. -package ristretto - -import ( - "bytes" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - "unsafe" - - "github.com/dgraph-io/ristretto/z" -) - -var ( - // TODO: find the optimal value for this or make it configurable - setBufSize = 32 * 1024 -) - -type itemCallback func(*Item) - -const itemSize = int64(unsafe.Sizeof(storeItem{})) - -// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission -// policy and a Sampled LFU eviction policy. You can use the same Cache instance -// from as many goroutines as you want. -type Cache struct { - // store is the central concurrent hashmap where key-value items are stored. - store store - // policy determines what gets let in to the cache and what gets kicked out. - policy policy - // getBuf is a custom ring buffer implementation that gets pushed to when - // keys are read. - getBuf *ringBuffer - // setBuf is a buffer allowing us to batch/drop Sets during times of high - // contention. - setBuf chan *Item - // onEvict is called for item evictions. - onEvict itemCallback - // onReject is called when an item is rejected via admission policy. - onReject itemCallback - // onExit is called whenever a value goes out of scope from the cache. - onExit (func(interface{})) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - keyToHash func(interface{}) (uint64, uint64) - // stop is used to stop the processItems goroutine. - stop chan struct{} - // indicates whether cache is closed. - isClosed bool - // cost calculates cost from a value. - cost func(value interface{}) int64 - // ignoreInternalCost dictates whether to ignore the cost of internally storing - // the item in the cost calculation. - ignoreInternalCost bool - // cleanupTicker is used to periodically check for entries whose TTL has passed. - cleanupTicker *time.Ticker - // Metrics contains a running log of important statistics like hits, misses, - // and dropped items. - Metrics *Metrics -} - -// Config is passed to NewCache for creating new Cache instances. -type Config struct { - // NumCounters determines the number of counters (keys) to keep that hold - // access frequency information. It's generally a good idea to have more - // counters than the max cache capacity, as this will improve eviction - // accuracy and subsequent hit ratios. - // - // For example, if you expect your cache to hold 1,000,000 items when full, - // NumCounters should be 10,000,000 (10x). Each counter takes up roughly - // 3 bytes (4 bits for each counter * 4 copies plus about a byte per - // counter for the bloom filter). Note that the number of counters is - // internally rounded up to the nearest power of 2, so the space usage - // may be a little larger than 3 bytes * NumCounters. - NumCounters int64 - // MaxCost can be considered as the cache capacity, in whatever units you - // choose to use. - // - // For example, if you want the cache to have a max capacity of 100MB, you - // would set MaxCost to 100,000,000 and pass an item's number of bytes as - // the `cost` parameter for calls to Set. If new items are accepted, the - // eviction process will take care of making room for the new item and not - // overflowing the MaxCost value. - MaxCost int64 - // BufferItems determines the size of Get buffers. - // - // Unless you have a rare use case, using `64` as the BufferItems value - // results in good performance. - BufferItems int64 - // Metrics determines whether cache statistics are kept during the cache's - // lifetime. There *is* some overhead to keeping statistics, so you should - // only set this flag to true when testing or throughput performance isn't a - // major factor. - Metrics bool - // OnEvict is called for every eviction and passes the hashed key, value, - // and cost to the function. - OnEvict func(item *Item) - // OnReject is called for every rejection done via the policy. - OnReject func(item *Item) - // OnExit is called whenever a value is removed from cache. This can be - // used to do manual memory deallocation. Would also be called on eviction - // and rejection of the value. - OnExit func(val interface{}) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - KeyToHash func(key interface{}) (uint64, uint64) - // Cost evaluates a value and outputs a corresponding cost. This function - // is ran after Set is called for a new item or an item update with a cost - // param of 0. - Cost func(value interface{}) int64 - // IgnoreInternalCost set to true indicates to the cache that the cost of - // internally storing the value should be ignored. This is useful when the - // cost passed to set is not using bytes as units. Keep in mind that setting - // this to true will increase the memory usage. - IgnoreInternalCost bool -} - -type itemFlag byte - -const ( - itemNew itemFlag = iota - itemDelete - itemUpdate -) - -// Item is passed to setBuf so items can eventually be added to the cache. -type Item struct { - flag itemFlag - Key uint64 - Conflict uint64 - Value interface{} - Cost int64 - Expiration time.Time - wg *sync.WaitGroup -} - -// NewCache returns a new Cache instance and any configuration errors, if any. -func NewCache(config *Config) (*Cache, error) { - switch { - case config.NumCounters == 0: - return nil, errors.New("NumCounters can't be zero") - case config.MaxCost == 0: - return nil, errors.New("MaxCost can't be zero") - case config.BufferItems == 0: - return nil, errors.New("BufferItems can't be zero") - } - policy := newPolicy(config.NumCounters, config.MaxCost) - cache := &Cache{ - store: newStore(), - policy: policy, - getBuf: newRingBuffer(policy, config.BufferItems), - setBuf: make(chan *Item, setBufSize), - keyToHash: config.KeyToHash, - stop: make(chan struct{}), - cost: config.Cost, - ignoreInternalCost: config.IgnoreInternalCost, - cleanupTicker: time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2), - } - cache.onExit = func(val interface{}) { - if config.OnExit != nil && val != nil { - config.OnExit(val) - } - } - cache.onEvict = func(item *Item) { - if config.OnEvict != nil { - config.OnEvict(item) - } - cache.onExit(item.Value) - } - cache.onReject = func(item *Item) { - if config.OnReject != nil { - config.OnReject(item) - } - cache.onExit(item.Value) - } - if cache.keyToHash == nil { - cache.keyToHash = z.KeyToHash - } - if config.Metrics { - cache.collectMetrics() - } - // NOTE: benchmarks seem to show that performance decreases the more - // goroutines we have running cache.processItems(), so 1 should - // usually be sufficient - go cache.processItems() - return cache, nil -} - -func (c *Cache) Wait() { - if c == nil || c.isClosed { - return - } - wg := &sync.WaitGroup{} - wg.Add(1) - c.setBuf <- &Item{wg: wg} - wg.Wait() -} - -// Get returns the value (if any) and a boolean representing whether the -// value was found or not. The value can be nil and the boolean can be true at -// the same time. -func (c *Cache) Get(key interface{}) (interface{}, bool) { - if c == nil || c.isClosed || key == nil { - return nil, false - } - keyHash, conflictHash := c.keyToHash(key) - c.getBuf.Push(keyHash) - value, ok := c.store.Get(keyHash, conflictHash) - if ok { - c.Metrics.add(hit, keyHash, 1) - } else { - c.Metrics.add(miss, keyHash, 1) - } - return value, ok -} - -// Set attempts to add the key-value item to the cache. If it returns false, -// then the Set was dropped and the key-value item isn't added to the cache. If -// it returns true, there's still a chance it could be dropped by the policy if -// its determined that the key-value item isn't worth keeping, but otherwise the -// item will be added and other items will be evicted in order to make room. -// -// To dynamically evaluate the items cost using the Config.Coster function, set -// the cost parameter to 0 and Coster will be ran when needed in order to find -// the items true cost. -func (c *Cache) Set(key, value interface{}, cost int64) bool { - return c.SetWithTTL(key, value, cost, 0*time.Second) -} - -// SetWithTTL works like Set but adds a key-value pair to the cache that will expire -// after the specified TTL (time to live) has passed. A zero value means the value never -// expires, which is identical to calling Set. A negative value is a no-op and the value -// is discarded. -func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool { - if c == nil || c.isClosed || key == nil { - return false - } - - var expiration time.Time - switch { - case ttl == 0: - // No expiration. - break - case ttl < 0: - // Treat this a a no-op. - return false - default: - expiration = time.Now().Add(ttl) - } - - keyHash, conflictHash := c.keyToHash(key) - i := &Item{ - flag: itemNew, - Key: keyHash, - Conflict: conflictHash, - Value: value, - Cost: cost, - Expiration: expiration, - } - // cost is eventually updated. The expiration must also be immediately updated - // to prevent items from being prematurely removed from the map. - if prev, ok := c.store.Update(i); ok { - c.onExit(prev) - i.flag = itemUpdate - } - // Attempt to send item to policy. - select { - case c.setBuf <- i: - return true - default: - if i.flag == itemUpdate { - // Return true if this was an update operation since we've already - // updated the store. For all the other operations (set/delete), we - // return false which means the item was not inserted. - return true - } - c.Metrics.add(dropSets, keyHash, 1) - return false - } -} - -// Del deletes the key-value item from the cache if it exists. -func (c *Cache) Del(key interface{}) { - if c == nil || c.isClosed || key == nil { - return - } - keyHash, conflictHash := c.keyToHash(key) - // Delete immediately. - _, prev := c.store.Del(keyHash, conflictHash) - c.onExit(prev) - // If we've set an item, it would be applied slightly later. - // So we must push the same item to `setBuf` with the deletion flag. - // This ensures that if a set is followed by a delete, it will be - // applied in the correct order. - c.setBuf <- &Item{ - flag: itemDelete, - Key: keyHash, - Conflict: conflictHash, - } -} - -// GetTTL returns the TTL for the specified key and a bool that is true if the -// item was found and is not expired. -func (c *Cache) GetTTL(key interface{}) (time.Duration, bool) { - if c == nil || key == nil { - return 0, false - } - - keyHash, conflictHash := c.keyToHash(key) - if _, ok := c.store.Get(keyHash, conflictHash); !ok { - // not found - return 0, false - } - - expiration := c.store.Expiration(keyHash) - if expiration.IsZero() { - // found but no expiration - return 0, true - } - - if time.Now().After(expiration) { - // found but expired - return 0, false - } - - return time.Until(expiration), true -} - -// Close stops all goroutines and closes all channels. -func (c *Cache) Close() { - if c == nil || c.isClosed { - return - } - c.Clear() - - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - close(c.stop) - close(c.setBuf) - c.policy.Close() - c.isClosed = true -} - -// Clear empties the hashmap and zeroes all policy counters. Note that this is -// not an atomic operation (but that shouldn't be a problem as it's assumed that -// Set/Get calls won't be occurring until after this). -func (c *Cache) Clear() { - if c == nil || c.isClosed { - return - } - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - - // Clear out the setBuf channel. -loop: - for { - select { - case i := <-c.setBuf: - if i.wg != nil { - i.wg.Done() - continue - } - if i.flag != itemUpdate { - // In itemUpdate, the value is already set in the store. So, no need to call - // onEvict here. - c.onEvict(i) - } - default: - break loop - } - } - - // Clear value hashmap and policy data. - c.policy.Clear() - c.store.Clear(c.onEvict) - // Only reset metrics if they're enabled. - if c.Metrics != nil { - c.Metrics.Clear() - } - // Restart processItems goroutine. - go c.processItems() -} - -// MaxCost returns the max cost of the cache. -func (c *Cache) MaxCost() int64 { - if c == nil { - return 0 - } - return c.policy.MaxCost() -} - -// UpdateMaxCost updates the maxCost of an existing cache. -func (c *Cache) UpdateMaxCost(maxCost int64) { - if c == nil { - return - } - c.policy.UpdateMaxCost(maxCost) -} - -// processItems is ran by goroutines processing the Set buffer. -func (c *Cache) processItems() { - startTs := make(map[uint64]time.Time) - numToKeep := 100000 // TODO: Make this configurable via options. - - trackAdmission := func(key uint64) { - if c.Metrics == nil { - return - } - startTs[key] = time.Now() - if len(startTs) > numToKeep { - for k := range startTs { - if len(startTs) <= numToKeep { - break - } - delete(startTs, k) - } - } - } - onEvict := func(i *Item) { - if ts, has := startTs[i.Key]; has { - c.Metrics.trackEviction(int64(time.Since(ts) / time.Second)) - delete(startTs, i.Key) - } - if c.onEvict != nil { - c.onEvict(i) - } - } - - for { - select { - case i := <-c.setBuf: - if i.wg != nil { - i.wg.Done() - continue - } - // Calculate item cost value if new or update. - if i.Cost == 0 && c.cost != nil && i.flag != itemDelete { - i.Cost = c.cost(i.Value) - } - if !c.ignoreInternalCost { - // Add the cost of internally storing the object. - i.Cost += itemSize - } - - switch i.flag { - case itemNew: - victims, added := c.policy.Add(i.Key, i.Cost) - if added { - c.store.Set(i) - c.Metrics.add(keyAdd, i.Key, 1) - trackAdmission(i.Key) - } else { - c.onReject(i) - } - for _, victim := range victims { - victim.Conflict, victim.Value = c.store.Del(victim.Key, 0) - onEvict(victim) - } - - case itemUpdate: - c.policy.Update(i.Key, i.Cost) - - case itemDelete: - c.policy.Del(i.Key) // Deals with metrics updates. - _, val := c.store.Del(i.Key, i.Conflict) - c.onExit(val) - } - case <-c.cleanupTicker.C: - c.store.Cleanup(c.policy, onEvict) - case <-c.stop: - return - } - } -} - -// collectMetrics just creates a new *Metrics instance and adds the pointers -// to the cache and policy instances. -func (c *Cache) collectMetrics() { - c.Metrics = newMetrics() - c.policy.CollectMetrics(c.Metrics) -} - -type metricType int - -const ( - // The following 2 keep track of hits and misses. - hit = iota - miss - // The following 3 keep track of number of keys added, updated and evicted. - keyAdd - keyUpdate - keyEvict - // The following 2 keep track of cost of keys added and evicted. - costAdd - costEvict - // The following keep track of how many sets were dropped or rejected later. - dropSets - rejectSets - // The following 2 keep track of how many gets were kept and dropped on the - // floor. - dropGets - keepGets - // This should be the final enum. Other enums should be set before this. - doNotUse -) - -func stringFor(t metricType) string { - switch t { - case hit: - return "hit" - case miss: - return "miss" - case keyAdd: - return "keys-added" - case keyUpdate: - return "keys-updated" - case keyEvict: - return "keys-evicted" - case costAdd: - return "cost-added" - case costEvict: - return "cost-evicted" - case dropSets: - return "sets-dropped" - case rejectSets: - return "sets-rejected" // by policy. - case dropGets: - return "gets-dropped" - case keepGets: - return "gets-kept" - default: - return "unidentified" - } -} - -// Metrics is a snapshot of performance statistics for the lifetime of a cache instance. -type Metrics struct { - all [doNotUse][]*uint64 - - mu sync.RWMutex - life *z.HistogramData // Tracks the life expectancy of a key. -} - -func newMetrics() *Metrics { - s := &Metrics{ - life: z.NewHistogramData(z.HistogramBounds(1, 16)), - } - for i := 0; i < doNotUse; i++ { - s.all[i] = make([]*uint64, 256) - slice := s.all[i] - for j := range slice { - slice[j] = new(uint64) - } - } - return s -} - -func (p *Metrics) add(t metricType, hash, delta uint64) { - if p == nil { - return - } - valp := p.all[t] - // Avoid false sharing by padding at least 64 bytes of space between two - // atomic counters which would be incremented. - idx := (hash % 25) * 10 - atomic.AddUint64(valp[idx], delta) -} - -func (p *Metrics) get(t metricType) uint64 { - if p == nil { - return 0 - } - valp := p.all[t] - var total uint64 - for i := range valp { - total += atomic.LoadUint64(valp[i]) - } - return total -} - -// Hits is the number of Get calls where a value was found for the corresponding key. -func (p *Metrics) Hits() uint64 { - return p.get(hit) -} - -// Misses is the number of Get calls where a value was not found for the corresponding key. -func (p *Metrics) Misses() uint64 { - return p.get(miss) -} - -// KeysAdded is the total number of Set calls where a new key-value item was added. -func (p *Metrics) KeysAdded() uint64 { - return p.get(keyAdd) -} - -// KeysUpdated is the total number of Set calls where the value was updated. -func (p *Metrics) KeysUpdated() uint64 { - return p.get(keyUpdate) -} - -// KeysEvicted is the total number of keys evicted. -func (p *Metrics) KeysEvicted() uint64 { - return p.get(keyEvict) -} - -// CostAdded is the sum of costs that have been added (successful Set calls). -func (p *Metrics) CostAdded() uint64 { - return p.get(costAdd) -} - -// CostEvicted is the sum of all costs that have been evicted. -func (p *Metrics) CostEvicted() uint64 { - return p.get(costEvict) -} - -// SetsDropped is the number of Set calls that don't make it into internal -// buffers (due to contention or some other reason). -func (p *Metrics) SetsDropped() uint64 { - return p.get(dropSets) -} - -// SetsRejected is the number of Set calls rejected by the policy (TinyLFU). -func (p *Metrics) SetsRejected() uint64 { - return p.get(rejectSets) -} - -// GetsDropped is the number of Get counter increments that are dropped -// internally. -func (p *Metrics) GetsDropped() uint64 { - return p.get(dropGets) -} - -// GetsKept is the number of Get counter increments that are kept. -func (p *Metrics) GetsKept() uint64 { - return p.get(keepGets) -} - -// Ratio is the number of Hits over all accesses (Hits + Misses). This is the -// percentage of successful Get calls. -func (p *Metrics) Ratio() float64 { - if p == nil { - return 0.0 - } - hits, misses := p.get(hit), p.get(miss) - if hits == 0 && misses == 0 { - return 0.0 - } - return float64(hits) / float64(hits+misses) -} - -func (p *Metrics) trackEviction(numSeconds int64) { - if p == nil { - return - } - p.mu.Lock() - defer p.mu.Unlock() - p.life.Update(numSeconds) -} - -func (p *Metrics) LifeExpectancySeconds() *z.HistogramData { - if p == nil { - return nil - } - p.mu.RLock() - defer p.mu.RUnlock() - return p.life.Copy() -} - -// Clear resets all the metrics. -func (p *Metrics) Clear() { - if p == nil { - return - } - for i := 0; i < doNotUse; i++ { - for j := range p.all[i] { - atomic.StoreUint64(p.all[i][j], 0) - } - } - p.mu.Lock() - p.life = z.NewHistogramData(z.HistogramBounds(1, 16)) - p.mu.Unlock() -} - -// String returns a string representation of the metrics. -func (p *Metrics) String() string { - if p == nil { - return "" - } - var buf bytes.Buffer - for i := 0; i < doNotUse; i++ { - t := metricType(i) - fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t)) - } - fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss)) - fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio()) - return buf.String() -} diff --git a/vendor/github.com/dgraph-io/ristretto/policy.go b/vendor/github.com/dgraph-io/ristretto/policy.go deleted file mode 100644 index bf23f91..0000000 --- a/vendor/github.com/dgraph-io/ristretto/policy.go +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "math" - "sync" - "sync/atomic" - - "github.com/dgraph-io/ristretto/z" -) - -const ( - // lfuSample is the number of items to sample when looking at eviction - // candidates. 5 seems to be the most optimal number [citation needed]. - lfuSample = 5 -) - -// policy is the interface encapsulating eviction/admission behavior. -// -// TODO: remove this interface and just rename defaultPolicy to policy, as we -// are probably only going to use/implement/maintain one policy. -type policy interface { - ringConsumer - // Add attempts to Add the key-cost pair to the Policy. It returns a slice - // of evicted keys and a bool denoting whether or not the key-cost pair - // was added. If it returns true, the key should be stored in cache. - Add(uint64, int64) ([]*Item, bool) - // Has returns true if the key exists in the Policy. - Has(uint64) bool - // Del deletes the key from the Policy. - Del(uint64) - // Cap returns the available capacity. - Cap() int64 - // Close stops all goroutines and closes all channels. - Close() - // Update updates the cost value for the key. - Update(uint64, int64) - // Cost returns the cost value of a key or -1 if missing. - Cost(uint64) int64 - // Optionally, set stats object to track how policy is performing. - CollectMetrics(*Metrics) - // Clear zeroes out all counters and clears hashmaps. - Clear() - // MaxCost returns the current max cost of the cache policy. - MaxCost() int64 - // UpdateMaxCost updates the max cost of the cache policy. - UpdateMaxCost(int64) -} - -func newPolicy(numCounters, maxCost int64) policy { - return newDefaultPolicy(numCounters, maxCost) -} - -type defaultPolicy struct { - sync.Mutex - admit *tinyLFU - evict *sampledLFU - itemsCh chan []uint64 - stop chan struct{} - isClosed bool - metrics *Metrics -} - -func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { - p := &defaultPolicy{ - admit: newTinyLFU(numCounters), - evict: newSampledLFU(maxCost), - itemsCh: make(chan []uint64, 3), - stop: make(chan struct{}), - } - go p.processItems() - return p -} - -func (p *defaultPolicy) CollectMetrics(metrics *Metrics) { - p.metrics = metrics - p.evict.metrics = metrics -} - -type policyPair struct { - key uint64 - cost int64 -} - -func (p *defaultPolicy) processItems() { - for { - select { - case items := <-p.itemsCh: - p.Lock() - p.admit.Push(items) - p.Unlock() - case <-p.stop: - return - } - } -} - -func (p *defaultPolicy) Push(keys []uint64) bool { - if p.isClosed { - return false - } - - if len(keys) == 0 { - return true - } - - select { - case p.itemsCh <- keys: - p.metrics.add(keepGets, keys[0], uint64(len(keys))) - return true - default: - p.metrics.add(dropGets, keys[0], uint64(len(keys))) - return false - } -} - -// Add decides whether the item with the given key and cost should be accepted by -// the policy. It returns the list of victims that have been evicted and a boolean -// indicating whether the incoming item should be accepted. -func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { - p.Lock() - defer p.Unlock() - - // Cannot add an item bigger than entire cache. - if cost > p.evict.getMaxCost() { - return nil, false - } - - // No need to go any further if the item is already in the cache. - if has := p.evict.updateIfHas(key, cost); has { - // An update does not count as an addition, so return false. - return nil, false - } - - // If the execution reaches this point, the key doesn't exist in the cache. - // Calculate the remaining room in the cache (usually bytes). - room := p.evict.roomLeft(cost) - if room >= 0 { - // There's enough room in the cache to store the new item without - // overflowing. Do that now and stop here. - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return nil, true - } - - // incHits is the hit count for the incoming item. - incHits := p.admit.Estimate(key) - // sample is the eviction candidate pool to be filled via random sampling. - // TODO: perhaps we should use a min heap here. Right now our time - // complexity is N for finding the min. Min heap should bring it down to - // O(lg N). - sample := make([]*policyPair, 0, lfuSample) - // As items are evicted they will be appended to victims. - victims := make([]*Item, 0) - - // Delete victims until there's enough space or a minKey is found that has - // more hits than incoming item. - for ; room < 0; room = p.evict.roomLeft(cost) { - // Fill up empty slots in sample. - sample = p.evict.fillSample(sample) - - // Find minimally used item in sample. - minKey, minHits, minId, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0) - for i, pair := range sample { - // Look up hit count for sample key. - if hits := p.admit.Estimate(pair.key); hits < minHits { - minKey, minHits, minId, minCost = pair.key, hits, i, pair.cost - } - } - - // If the incoming item isn't worth keeping in the policy, reject. - if incHits < minHits { - p.metrics.add(rejectSets, key, 1) - return victims, false - } - - // Delete the victim from metadata. - p.evict.del(minKey) - - // Delete the victim from sample. - sample[minId] = sample[len(sample)-1] - sample = sample[:len(sample)-1] - // Store victim in evicted victims slice. - victims = append(victims, &Item{ - Key: minKey, - Conflict: 0, - Cost: minCost, - }) - } - - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return victims, true -} - -func (p *defaultPolicy) Has(key uint64) bool { - p.Lock() - _, exists := p.evict.keyCosts[key] - p.Unlock() - return exists -} - -func (p *defaultPolicy) Del(key uint64) { - p.Lock() - p.evict.del(key) - p.Unlock() -} - -func (p *defaultPolicy) Cap() int64 { - p.Lock() - capacity := int64(p.evict.getMaxCost() - p.evict.used) - p.Unlock() - return capacity -} - -func (p *defaultPolicy) Update(key uint64, cost int64) { - p.Lock() - p.evict.updateIfHas(key, cost) - p.Unlock() -} - -func (p *defaultPolicy) Cost(key uint64) int64 { - p.Lock() - if cost, found := p.evict.keyCosts[key]; found { - p.Unlock() - return cost - } - p.Unlock() - return -1 -} - -func (p *defaultPolicy) Clear() { - p.Lock() - p.admit.clear() - p.evict.clear() - p.Unlock() -} - -func (p *defaultPolicy) Close() { - if p.isClosed { - return - } - - // Block until the p.processItems goroutine returns. - p.stop <- struct{}{} - close(p.stop) - close(p.itemsCh) - p.isClosed = true -} - -func (p *defaultPolicy) MaxCost() int64 { - if p == nil || p.evict == nil { - return 0 - } - return p.evict.getMaxCost() -} - -func (p *defaultPolicy) UpdateMaxCost(maxCost int64) { - if p == nil || p.evict == nil { - return - } - p.evict.updateMaxCost(maxCost) -} - -// sampledLFU is an eviction helper storing key-cost pairs. -type sampledLFU struct { - // NOTE: align maxCost to 64-bit boundary for use with atomic. - // As per https://golang.org/pkg/sync/atomic/: "On ARM, x86-32, - // and 32-bit MIPS, it is the caller’s responsibility to arrange - // for 64-bit alignment of 64-bit words accessed atomically. - // The first word in a variable or in an allocated struct, array, - // or slice can be relied upon to be 64-bit aligned." - maxCost int64 - used int64 - metrics *Metrics - keyCosts map[uint64]int64 -} - -func newSampledLFU(maxCost int64) *sampledLFU { - return &sampledLFU{ - keyCosts: make(map[uint64]int64), - maxCost: maxCost, - } -} - -func (p *sampledLFU) getMaxCost() int64 { - return atomic.LoadInt64(&p.maxCost) -} - -func (p *sampledLFU) updateMaxCost(maxCost int64) { - atomic.StoreInt64(&p.maxCost, maxCost) -} - -func (p *sampledLFU) roomLeft(cost int64) int64 { - return p.getMaxCost() - (p.used + cost) -} - -func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair { - if len(in) >= lfuSample { - return in - } - for key, cost := range p.keyCosts { - in = append(in, &policyPair{key, cost}) - if len(in) >= lfuSample { - return in - } - } - return in -} - -func (p *sampledLFU) del(key uint64) { - cost, ok := p.keyCosts[key] - if !ok { - return - } - p.used -= cost - delete(p.keyCosts, key) - p.metrics.add(costEvict, key, uint64(cost)) - p.metrics.add(keyEvict, key, 1) -} - -func (p *sampledLFU) add(key uint64, cost int64) { - p.keyCosts[key] = cost - p.used += cost -} - -func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool { - if prev, found := p.keyCosts[key]; found { - // Update the cost of an existing key, but don't worry about evicting. - // Evictions will be handled the next time a new item is added. - p.metrics.add(keyUpdate, key, 1) - if prev > cost { - diff := prev - cost - p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1)) - } else if cost > prev { - diff := cost - prev - p.metrics.add(costAdd, key, uint64(diff)) - } - p.used += cost - prev - p.keyCosts[key] = cost - return true - } - return false -} - -func (p *sampledLFU) clear() { - p.used = 0 - p.keyCosts = make(map[uint64]int64) -} - -// tinyLFU is an admission helper that keeps track of access frequency using -// tiny (4-bit) counters in the form of a count-min sketch. -// tinyLFU is NOT thread safe. -type tinyLFU struct { - freq *cmSketch - door *z.Bloom - incrs int64 - resetAt int64 -} - -func newTinyLFU(numCounters int64) *tinyLFU { - return &tinyLFU{ - freq: newCmSketch(numCounters), - door: z.NewBloomFilter(float64(numCounters), 0.01), - resetAt: numCounters, - } -} - -func (p *tinyLFU) Push(keys []uint64) { - for _, key := range keys { - p.Increment(key) - } -} - -func (p *tinyLFU) Estimate(key uint64) int64 { - hits := p.freq.Estimate(key) - if p.door.Has(key) { - hits++ - } - return hits -} - -func (p *tinyLFU) Increment(key uint64) { - // Flip doorkeeper bit if not already done. - if added := p.door.AddIfNotHas(key); !added { - // Increment count-min counter if doorkeeper bit is already set. - p.freq.Increment(key) - } - p.incrs++ - if p.incrs >= p.resetAt { - p.reset() - } -} - -func (p *tinyLFU) reset() { - // Zero out incrs. - p.incrs = 0 - // clears doorkeeper bits - p.door.Clear() - // halves count-min counters - p.freq.Reset() -} - -func (p *tinyLFU) clear() { - p.incrs = 0 - p.door.Clear() - p.freq.Clear() -} diff --git a/vendor/github.com/dgraph-io/ristretto/ring.go b/vendor/github.com/dgraph-io/ristretto/ring.go deleted file mode 100644 index 5dbed4c..0000000 --- a/vendor/github.com/dgraph-io/ristretto/ring.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" -) - -// ringConsumer is the user-defined object responsible for receiving and -// processing items in batches when buffers are drained. -type ringConsumer interface { - Push([]uint64) bool -} - -// ringStripe is a singular ring buffer that is not concurrent safe. -type ringStripe struct { - cons ringConsumer - data []uint64 - capa int -} - -func newRingStripe(cons ringConsumer, capa int64) *ringStripe { - return &ringStripe{ - cons: cons, - data: make([]uint64, 0, capa), - capa: int(capa), - } -} - -// Push appends an item in the ring buffer and drains (copies items and -// sends to Consumer) if full. -func (s *ringStripe) Push(item uint64) { - s.data = append(s.data, item) - // Decide if the ring buffer should be drained. - if len(s.data) >= s.capa { - // Send elements to consumer and create a new ring stripe. - if s.cons.Push(s.data) { - s.data = make([]uint64, 0, s.capa) - } else { - s.data = s.data[:0] - } - } -} - -// ringBuffer stores multiple buffers (stripes) and distributes Pushed items -// between them to lower contention. -// -// This implements the "batching" process described in the BP-Wrapper paper -// (section III part A). -type ringBuffer struct { - pool *sync.Pool -} - -// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will -// be called when individual stripes are full and need to drain their elements. -func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer { - // LOSSY buffers use a very simple sync.Pool for concurrently reusing - // stripes. We do lose some stripes due to GC (unheld items in sync.Pool - // are cleared), but the performance gains generally outweigh the small - // percentage of elements lost. The performance primarily comes from - // low-level runtime functions used in the standard library that aren't - // available to us (such as runtime_procPin()). - return &ringBuffer{ - pool: &sync.Pool{ - New: func() interface{} { return newRingStripe(cons, capa) }, - }, - } -} - -// Push adds an element to one of the internal stripes and possibly drains if -// the stripe becomes full. -func (b *ringBuffer) Push(item uint64) { - // Reuse or create a new stripe. - stripe := b.pool.Get().(*ringStripe) - stripe.Push(item) - b.pool.Put(stripe) -} diff --git a/vendor/github.com/dgraph-io/ristretto/sketch.go b/vendor/github.com/dgraph-io/ristretto/sketch.go deleted file mode 100644 index 6368d2b..0000000 --- a/vendor/github.com/dgraph-io/ristretto/sketch.go +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// This package includes multiple probabalistic data structures needed for -// admission/eviction metadata. Most are Counting Bloom Filter variations, but -// a caching-specific feature that is also required is a "freshness" mechanism, -// which basically serves as a "lifetime" process. This freshness mechanism -// was described in the original TinyLFU paper [1], but other mechanisms may -// be better suited for certain data distributions. -// -// [1]: https://arxiv.org/abs/1512.00727 -package ristretto - -import ( - "fmt" - "math/rand" - "time" -) - -// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily -// based on Damian Gryski's CM4 [1]. -// -// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go -type cmSketch struct { - rows [cmDepth]cmRow - seed [cmDepth]uint64 - mask uint64 -} - -const ( - // cmDepth is the number of counter copies to store (think of it as rows). - cmDepth = 4 -) - -func newCmSketch(numCounters int64) *cmSketch { - if numCounters == 0 { - panic("cmSketch: bad numCounters") - } - // Get the next power of 2 for better cache performance. - numCounters = next2Power(numCounters) - sketch := &cmSketch{mask: uint64(numCounters - 1)} - // Initialize rows of counters and seeds. - // Cryptographic precision not needed - source := rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec - for i := 0; i < cmDepth; i++ { - sketch.seed[i] = source.Uint64() - sketch.rows[i] = newCmRow(numCounters) - } - return sketch -} - -// Increment increments the count(ers) for the specified key. -func (s *cmSketch) Increment(hashed uint64) { - for i := range s.rows { - s.rows[i].increment((hashed ^ s.seed[i]) & s.mask) - } -} - -// Estimate returns the value of the specified key. -func (s *cmSketch) Estimate(hashed uint64) int64 { - min := byte(255) - for i := range s.rows { - val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask) - if val < min { - min = val - } - } - return int64(min) -} - -// Reset halves all counter values. -func (s *cmSketch) Reset() { - for _, r := range s.rows { - r.reset() - } -} - -// Clear zeroes all counters. -func (s *cmSketch) Clear() { - for _, r := range s.rows { - r.clear() - } -} - -// cmRow is a row of bytes, with each byte holding two counters. -type cmRow []byte - -func newCmRow(numCounters int64) cmRow { - return make(cmRow, numCounters/2) -} - -func (r cmRow) get(n uint64) byte { - return byte(r[n/2]>>((n&1)*4)) & 0x0f -} - -func (r cmRow) increment(n uint64) { - // Index of the counter. - i := n / 2 - // Shift distance (even 0, odd 4). - s := (n & 1) * 4 - // Counter value. - v := (r[i] >> s) & 0x0f - // Only increment if not max value (overflow wrap is bad for LFU). - if v < 15 { - r[i] += 1 << s - } -} - -func (r cmRow) reset() { - // Halve each counter. - for i := range r { - r[i] = (r[i] >> 1) & 0x77 - } -} - -func (r cmRow) clear() { - // Zero each counter. - for i := range r { - r[i] = 0 - } -} - -func (r cmRow) string() string { - s := "" - for i := uint64(0); i < uint64(len(r)*2); i++ { - s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f) - } - s = s[:len(s)-1] - return s -} - -// next2Power rounds x up to the next power of 2, if it's not already one. -func next2Power(x int64) int64 { - x-- - x |= x >> 1 - x |= x >> 2 - x |= x >> 4 - x |= x >> 8 - x |= x >> 16 - x |= x >> 32 - x++ - return x -} diff --git a/vendor/github.com/dgraph-io/ristretto/store.go b/vendor/github.com/dgraph-io/ristretto/store.go deleted file mode 100644 index e42a98b..0000000 --- a/vendor/github.com/dgraph-io/ristretto/store.go +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" - "time" -) - -// TODO: Do we need this to be a separate struct from Item? -type storeItem struct { - key uint64 - conflict uint64 - value interface{} - expiration time.Time -} - -// store is the interface fulfilled by all hash map implementations in this -// file. Some hash map implementations are better suited for certain data -// distributions than others, so this allows us to abstract that out for use -// in Ristretto. -// -// Every store is safe for concurrent usage. -type store interface { - // Get returns the value associated with the key parameter. - Get(uint64, uint64) (interface{}, bool) - // Expiration returns the expiration time for this key. - Expiration(uint64) time.Time - // Set adds the key-value pair to the Map or updates the value if it's - // already present. The key-value pair is passed as a pointer to an - // item object. - Set(*Item) - // Del deletes the key-value pair from the Map. - Del(uint64, uint64) (uint64, interface{}) - // Update attempts to update the key with a new value and returns true if - // successful. - Update(*Item) (interface{}, bool) - // Cleanup removes items that have an expired TTL. - Cleanup(policy policy, onEvict itemCallback) - // Clear clears all contents of the store. - Clear(onEvict itemCallback) -} - -// newStore returns the default store implementation. -func newStore() store { - return newShardedMap() -} - -const numShards uint64 = 256 - -type shardedMap struct { - shards []*lockedMap - expiryMap *expirationMap -} - -func newShardedMap() *shardedMap { - sm := &shardedMap{ - shards: make([]*lockedMap, int(numShards)), - expiryMap: newExpirationMap(), - } - for i := range sm.shards { - sm.shards[i] = newLockedMap(sm.expiryMap) - } - return sm -} - -func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) { - return sm.shards[key%numShards].get(key, conflict) -} - -func (sm *shardedMap) Expiration(key uint64) time.Time { - return sm.shards[key%numShards].Expiration(key) -} - -func (sm *shardedMap) Set(i *Item) { - if i == nil { - // If item is nil make this Set a no-op. - return - } - - sm.shards[i.Key%numShards].Set(i) -} - -func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) { - return sm.shards[key%numShards].Del(key, conflict) -} - -func (sm *shardedMap) Update(newItem *Item) (interface{}, bool) { - return sm.shards[newItem.Key%numShards].Update(newItem) -} - -func (sm *shardedMap) Cleanup(policy policy, onEvict itemCallback) { - sm.expiryMap.cleanup(sm, policy, onEvict) -} - -func (sm *shardedMap) Clear(onEvict itemCallback) { - for i := uint64(0); i < numShards; i++ { - sm.shards[i].Clear(onEvict) - } -} - -type lockedMap struct { - sync.RWMutex - data map[uint64]storeItem - em *expirationMap -} - -func newLockedMap(em *expirationMap) *lockedMap { - return &lockedMap{ - data: make(map[uint64]storeItem), - em: em, - } -} - -func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) { - m.RLock() - item, ok := m.data[key] - m.RUnlock() - if !ok { - return nil, false - } - if conflict != 0 && (conflict != item.conflict) { - return nil, false - } - - // Handle expired items. - if !item.expiration.IsZero() && time.Now().After(item.expiration) { - return nil, false - } - return item.value, true -} - -func (m *lockedMap) Expiration(key uint64) time.Time { - m.RLock() - defer m.RUnlock() - return m.data[key].expiration -} - -func (m *lockedMap) Set(i *Item) { - if i == nil { - // If the item is nil make this Set a no-op. - return - } - - m.Lock() - defer m.Unlock() - item, ok := m.data[i.Key] - - if ok { - // The item existed already. We need to check the conflict key and reject the - // update if they do not match. Only after that the expiration map is updated. - if i.Conflict != 0 && (i.Conflict != item.conflict) { - return - } - m.em.update(i.Key, i.Conflict, item.expiration, i.Expiration) - } else { - // The value is not in the map already. There's no need to return anything. - // Simply add the expiration map. - m.em.add(i.Key, i.Conflict, i.Expiration) - } - - m.data[i.Key] = storeItem{ - key: i.Key, - conflict: i.Conflict, - value: i.Value, - expiration: i.Expiration, - } -} - -func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) { - m.Lock() - item, ok := m.data[key] - if !ok { - m.Unlock() - return 0, nil - } - if conflict != 0 && (conflict != item.conflict) { - m.Unlock() - return 0, nil - } - - if !item.expiration.IsZero() { - m.em.del(key, item.expiration) - } - - delete(m.data, key) - m.Unlock() - return item.conflict, item.value -} - -func (m *lockedMap) Update(newItem *Item) (interface{}, bool) { - m.Lock() - item, ok := m.data[newItem.Key] - if !ok { - m.Unlock() - return nil, false - } - if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) { - m.Unlock() - return nil, false - } - - m.em.update(newItem.Key, newItem.Conflict, item.expiration, newItem.Expiration) - m.data[newItem.Key] = storeItem{ - key: newItem.Key, - conflict: newItem.Conflict, - value: newItem.Value, - expiration: newItem.Expiration, - } - - m.Unlock() - return item.value, true -} - -func (m *lockedMap) Clear(onEvict itemCallback) { - m.Lock() - i := &Item{} - if onEvict != nil { - for _, si := range m.data { - i.Key = si.key - i.Conflict = si.conflict - i.Value = si.value - onEvict(i) - } - } - m.data = make(map[uint64]storeItem) - m.Unlock() -} diff --git a/vendor/github.com/dgraph-io/ristretto/ttl.go b/vendor/github.com/dgraph-io/ristretto/ttl.go deleted file mode 100644 index 337976a..0000000 --- a/vendor/github.com/dgraph-io/ristretto/ttl.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" - "time" -) - -var ( - // TODO: find the optimal value or make it configurable. - bucketDurationSecs = int64(5) -) - -func storageBucket(t time.Time) int64 { - return (t.Unix() / bucketDurationSecs) + 1 -} - -func cleanupBucket(t time.Time) int64 { - // The bucket to cleanup is always behind the storage bucket by one so that - // no elements in that bucket (which might not have expired yet) are deleted. - return storageBucket(t) - 1 -} - -// bucket type is a map of key to conflict. -type bucket map[uint64]uint64 - -// expirationMap is a map of bucket number to the corresponding bucket. -type expirationMap struct { - sync.RWMutex - buckets map[int64]bucket -} - -func newExpirationMap() *expirationMap { - return &expirationMap{ - buckets: make(map[int64]bucket), - } -} - -func (m *expirationMap) add(key, conflict uint64, expiration time.Time) { - if m == nil { - return - } - - // Items that don't expire don't need to be in the expiration map. - if expiration.IsZero() { - return - } - - bucketNum := storageBucket(expiration) - m.Lock() - defer m.Unlock() - - b, ok := m.buckets[bucketNum] - if !ok { - b = make(bucket) - m.buckets[bucketNum] = b - } - b[key] = conflict -} - -func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time.Time) { - if m == nil { - return - } - - m.Lock() - defer m.Unlock() - - oldBucketNum := storageBucket(oldExpTime) - oldBucket, ok := m.buckets[oldBucketNum] - if ok { - delete(oldBucket, key) - } - - newBucketNum := storageBucket(newExpTime) - newBucket, ok := m.buckets[newBucketNum] - if !ok { - newBucket = make(bucket) - m.buckets[newBucketNum] = newBucket - } - newBucket[key] = conflict -} - -func (m *expirationMap) del(key uint64, expiration time.Time) { - if m == nil { - return - } - - bucketNum := storageBucket(expiration) - m.Lock() - defer m.Unlock() - _, ok := m.buckets[bucketNum] - if !ok { - return - } - delete(m.buckets[bucketNum], key) -} - -// cleanup removes all the items in the bucket that was just completed. It deletes -// those items from the store, and calls the onEvict function on those items. -// This function is meant to be called periodically. -func (m *expirationMap) cleanup(store store, policy policy, onEvict itemCallback) { - if m == nil { - return - } - - m.Lock() - now := time.Now() - bucketNum := cleanupBucket(now) - keys := m.buckets[bucketNum] - delete(m.buckets, bucketNum) - m.Unlock() - - for key, conflict := range keys { - // Sanity check. Verify that the store agrees that this key is expired. - if store.Expiration(key).After(now) { - continue - } - - cost := policy.Cost(key) - policy.Del(key) - _, value := store.Del(key, conflict) - - if onEvict != nil { - onEvict(&Item{Key: key, - Conflict: conflict, - Value: value, - Cost: cost, - }) - } - } -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/LICENSE b/vendor/github.com/dgraph-io/ristretto/z/LICENSE deleted file mode 100644 index 0860cbf..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/LICENSE +++ /dev/null @@ -1,64 +0,0 @@ -bbloom.go - -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -rtutil.go - -// MIT License - -// Copyright (c) 2019 Ewan Chou - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -Modifications: - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - diff --git a/vendor/github.com/dgraph-io/ristretto/z/README.md b/vendor/github.com/dgraph-io/ristretto/z/README.md deleted file mode 100644 index 6d77e14..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/README.md +++ /dev/null @@ -1,129 +0,0 @@ -## bbloom: a bitset Bloom filter for go/golang -=== - -package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. - -NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom - -=== - -changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. - -This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". -Nonetheless bbloom should work with any other form of entries. - -~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~ - -Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash ) - -Minimum hashset size is: 512 ([4]uint64; will be set automatically). - -###install - -```sh -go get github.com/AndreasBriese/bbloom -``` - -###test -+ change to folder ../bbloom -+ create wordlist in file "words.txt" (you might use `python permut.py`) -+ run 'go test -bench=.' within the folder - -```go -go test -bench=. -``` - -~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~ - -using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively) - -### usage - -after installation add - -```go -import ( - ... - "github.com/AndreasBriese/bbloom" - ... - ) -``` - -at your header. In the program use - -```go -// create a bloom filter for 65536 items and 1 % wrong-positive ratio -bf := bbloom.New(float64(1<<16), float64(0.01)) - -// or -// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly -// bf = bbloom.New(float64(650000), float64(7)) -// or -bf = bbloom.New(650000.0, 7.0) - -// add one item -bf.Add([]byte("butter")) - -// Number of elements added is exposed now -// Note: ElemNum will not be included in JSON export (for compatability to older version) -nOfElementsInFilter := bf.ElemNum - -// check if item is in the filter -isIn := bf.Has([]byte("butter")) // should be true -isNotIn := bf.Has([]byte("Butter")) // should be false - -// 'add only if item is new' to the bloomfilter -added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set -added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new - -// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS -// add one item -bf.AddTS([]byte("peanutbutter")) -// check if item is in the filter -isIn = bf.HasTS([]byte("peanutbutter")) // should be true -isNotIn = bf.HasTS([]byte("peanutButter")) // should be false -// 'add only if item is new' to the bloomfilter -added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set -added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new - -// convert to JSON ([]byte) -Json := bf.JSONMarshal() - -// bloomfilters Mutex is exposed for external un-/locking -// i.e. mutex lock while doing JSON conversion -bf.Mtx.Lock() -Json = bf.JSONMarshal() -bf.Mtx.Unlock() - -// restore a bloom filter from storage -bfNew := bbloom.JSONUnmarshal(Json) - -isInNew := bfNew.Has([]byte("butter")) // should be true -isNotInNew := bfNew.Has([]byte("Butter")) // should be false - -``` - -to work with the bloom filter. - -### why 'fast'? - -It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: - - - Bloom filter (filter size 524288, 7 hashlocs) - github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op) - github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op) - github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op) - github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op) - - github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op) - github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op) - github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op) - github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op) - github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op) - github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op) - -(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz) - - -With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. diff --git a/vendor/github.com/dgraph-io/ristretto/z/allocator.go b/vendor/github.com/dgraph-io/ristretto/z/allocator.go deleted file mode 100644 index eae0f83..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/allocator.go +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "bytes" - "fmt" - "math" - "math/bits" - "math/rand" - "strings" - "sync" - "sync/atomic" - "time" - "unsafe" - - "github.com/dustin/go-humanize" -) - -// Allocator amortizes the cost of small allocations by allocating memory in -// bigger chunks. Internally it uses z.Calloc to allocate memory. Once -// allocated, the memory is not moved, so it is safe to use the allocated bytes -// to unsafe cast them to Go struct pointers. Maintaining a freelist is slow. -// Instead, Allocator only allocates memory, with the idea that finally we -// would just release the entire Allocator. -type Allocator struct { - sync.Mutex - compIdx uint64 // Stores bufIdx in 32 MSBs and posIdx in 32 LSBs. - buffers [][]byte - Ref uint64 - Tag string -} - -// allocs keeps references to all Allocators, so we can safely discard them later. -var allocsMu *sync.Mutex -var allocRef uint64 -var allocs map[uint64]*Allocator -var calculatedLog2 []int - -func init() { - allocsMu = new(sync.Mutex) - allocs = make(map[uint64]*Allocator) - - // Set up a unique Ref per process. - rand.Seed(time.Now().UnixNano()) - allocRef = uint64(rand.Int63n(1<<16)) << 48 //nolint:gosec // cryptographic precision not needed - - calculatedLog2 = make([]int, 1025) - for i := 1; i <= 1024; i++ { - calculatedLog2[i] = int(math.Log2(float64(i))) - } -} - -// NewAllocator creates an allocator starting with the given size. -func NewAllocator(sz int, tag string) *Allocator { - ref := atomic.AddUint64(&allocRef, 1) - // We should not allow a zero sized page because addBufferWithMinSize - // will run into an infinite loop trying to double the pagesize. - if sz < 512 { - sz = 512 - } - a := &Allocator{ - Ref: ref, - buffers: make([][]byte, 64), - Tag: tag, - } - l2 := uint64(log2(sz)) - if bits.OnesCount64(uint64(sz)) > 1 { - l2 += 1 - } - a.buffers[0] = Calloc(1<> 32), int(pos & 0xFFFFFFFF) -} - -// Size returns the size of the allocations so far. -func (a *Allocator) Size() int { - pos := atomic.LoadUint64(&a.compIdx) - bi, pi := parse(pos) - var sz int - for i, b := range a.buffers { - if i < bi { - sz += len(b) - continue - } - sz += pi - return sz - } - panic("Size should not reach here") -} - -func log2(sz int) int { - if sz < len(calculatedLog2) { - return calculatedLog2[sz] - } - pow := 10 - sz >>= 10 - for sz > 1 { - sz >>= 1 - pow++ - } - return pow -} - -func (a *Allocator) Allocated() uint64 { - var alloc int - for _, b := range a.buffers { - alloc += cap(b) - } - return uint64(alloc) -} - -func (a *Allocator) TrimTo(max int) { - var alloc int - for i, b := range a.buffers { - if len(b) == 0 { - break - } - alloc += len(b) - if alloc < max { - continue - } - Free(b) - a.buffers[i] = nil - } -} - -// Release would release the memory back. Remember to make this call to avoid memory leaks. -func (a *Allocator) Release() { - if a == nil { - return - } - - var alloc int - for _, b := range a.buffers { - if len(b) == 0 { - break - } - alloc += len(b) - Free(b) - } - - allocsMu.Lock() - delete(allocs, a.Ref) - allocsMu.Unlock() -} - -const maxAlloc = 1 << 30 - -func (a *Allocator) MaxAlloc() int { - return maxAlloc -} - -const nodeAlign = unsafe.Sizeof(uint64(0)) - 1 - -func (a *Allocator) AllocateAligned(sz int) []byte { - tsz := sz + int(nodeAlign) - out := a.Allocate(tsz) - // We are reusing allocators. In that case, it's important to zero out the memory allocated - // here. We don't always zero it out (in Allocate), because other functions would be immediately - // overwriting the allocated slices anyway (see Copy). - ZeroOut(out, 0, len(out)) - - addr := uintptr(unsafe.Pointer(&out[0])) - aligned := (addr + nodeAlign) & ^nodeAlign - start := int(aligned - addr) - - return out[start : start+sz] -} - -func (a *Allocator) Copy(buf []byte) []byte { - if a == nil { - return append([]byte{}, buf...) - } - out := a.Allocate(len(buf)) - copy(out, buf) - return out -} - -func (a *Allocator) addBufferAt(bufIdx, minSz int) { - for { - if bufIdx >= len(a.buffers) { - panic(fmt.Sprintf("Allocator can not allocate more than %d buffers", len(a.buffers))) - } - if len(a.buffers[bufIdx]) == 0 { - break - } - if minSz <= len(a.buffers[bufIdx]) { - // No need to do anything. We already have a buffer which can satisfy minSz. - return - } - bufIdx++ - } - assert(bufIdx > 0) - // We need to allocate a new buffer. - // Make pageSize double of the last allocation. - pageSize := 2 * len(a.buffers[bufIdx-1]) - // Ensure pageSize is bigger than sz. - for pageSize < minSz { - pageSize *= 2 - } - // If bigger than maxAlloc, trim to maxAlloc. - if pageSize > maxAlloc { - pageSize = maxAlloc - } - - buf := Calloc(pageSize, a.Tag) - assert(len(a.buffers[bufIdx]) == 0) - a.buffers[bufIdx] = buf -} - -func (a *Allocator) Allocate(sz int) []byte { - if a == nil { - return make([]byte, sz) - } - if sz > maxAlloc { - panic(fmt.Sprintf("Unable to allocate more than %d\n", maxAlloc)) - } - if sz == 0 { - return nil - } - for { - pos := atomic.AddUint64(&a.compIdx, uint64(sz)) - bufIdx, posIdx := parse(pos) - buf := a.buffers[bufIdx] - if posIdx > len(buf) { - a.Lock() - newPos := atomic.LoadUint64(&a.compIdx) - newBufIdx, _ := parse(newPos) - if newBufIdx != bufIdx { - a.Unlock() - continue - } - a.addBufferAt(bufIdx+1, sz) - atomic.StoreUint64(&a.compIdx, uint64((bufIdx+1)<<32)) - a.Unlock() - // We added a new buffer. Let's acquire slice the right way by going back to the top. - continue - } - data := buf[posIdx-sz : posIdx] - return data - } -} - -type AllocatorPool struct { - numGets int64 - allocCh chan *Allocator - closer *Closer -} - -func NewAllocatorPool(sz int) *AllocatorPool { - a := &AllocatorPool{ - allocCh: make(chan *Allocator, sz), - closer: NewCloser(1), - } - go a.freeupAllocators() - return a -} - -func (p *AllocatorPool) Get(sz int, tag string) *Allocator { - if p == nil { - return NewAllocator(sz, tag) - } - atomic.AddInt64(&p.numGets, 1) - select { - case alloc := <-p.allocCh: - alloc.Reset() - alloc.Tag = tag - return alloc - default: - return NewAllocator(sz, tag) - } -} -func (p *AllocatorPool) Return(a *Allocator) { - if a == nil { - return - } - if p == nil { - a.Release() - return - } - a.TrimTo(400 << 20) - - select { - case p.allocCh <- a: - return - default: - a.Release() - } -} - -func (p *AllocatorPool) Release() { - if p == nil { - return - } - p.closer.SignalAndWait() -} - -func (p *AllocatorPool) freeupAllocators() { - defer p.closer.Done() - - ticker := time.NewTicker(2 * time.Second) - defer ticker.Stop() - - releaseOne := func() bool { - select { - case alloc := <-p.allocCh: - alloc.Release() - return true - default: - return false - } - } - - var last int64 - for { - select { - case <-p.closer.HasBeenClosed(): - close(p.allocCh) - for alloc := range p.allocCh { - alloc.Release() - } - return - - case <-ticker.C: - gets := atomic.LoadInt64(&p.numGets) - if gets != last { - // Some retrievals were made since the last time. So, let's avoid doing a release. - last = gets - continue - } - releaseOne() - } - } -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go b/vendor/github.com/dgraph-io/ristretto/z/bbloom.go deleted file mode 100644 index 37135b0..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go +++ /dev/null @@ -1,211 +0,0 @@ -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package z - -import ( - "bytes" - "encoding/json" - "math" - "unsafe" - - "github.com/golang/glog" -) - -// helper -var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} - -func getSize(ui64 uint64) (size uint64, exponent uint64) { - if ui64 < uint64(512) { - ui64 = uint64(512) - } - size = uint64(1) - for size < ui64 { - size <<= 1 - exponent++ - } - return size, exponent -} - -func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) { - size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2) - locs := math.Ceil(float64(0.69314718056) * size / numEntries) - return uint64(size), uint64(locs) -} - -// NewBloomFilter returns a new bloomfilter. -func NewBloomFilter(params ...float64) (bloomfilter *Bloom) { - var entries, locs uint64 - if len(params) == 2 { - if params[1] < 1 { - entries, locs = calcSizeByWrongPositives(params[0], params[1]) - } else { - entries, locs = uint64(params[0]), uint64(params[1]) - } - } else { - glog.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" + - " i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries)," + - " float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))") - } - size, exponent := getSize(entries) - bloomfilter = &Bloom{ - sizeExp: exponent, - size: size - 1, - setLocs: locs, - shift: 64 - exponent, - } - bloomfilter.Size(size) - return bloomfilter -} - -// Bloom filter -type Bloom struct { - bitset []uint64 - ElemNum uint64 - sizeExp uint64 - size uint64 - setLocs uint64 - shift uint64 -} - -// <--- http://www.cse.yorku.ca/~oz/hash.html -// modified Berkeley DB Hash (32bit) -// hash is casted to l, h = 16bit fragments -// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { -// hash := uint64(len(*b)) -// for _, c := range *b { -// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash -// } -// h = hash >> bl.shift -// l = hash << bl.shift >> bl.shift -// return l, h -// } - -// Add adds hash of a key to the bloomfilter. -func (bl *Bloom) Add(hash uint64) { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - bl.Set((h + i*l) & bl.size) - bl.ElemNum++ - } -} - -// Has checks if bit(s) for entry hash is/are set, -// returns true if the hash was added to the Bloom Filter. -func (bl Bloom) Has(hash uint64) bool { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - if !bl.IsSet((h + i*l) & bl.size) { - return false - } - } - return true -} - -// AddIfNotHas only Adds hash, if it's not present in the bloomfilter. -// Returns true if hash was added. -// Returns false if hash was already registered in the bloomfilter. -func (bl *Bloom) AddIfNotHas(hash uint64) bool { - if bl.Has(hash) { - return false - } - bl.Add(hash) - return true -} - -// TotalSize returns the total size of the bloom filter. -func (bl *Bloom) TotalSize() int { - // The bl struct has 5 members and each one is 8 byte. The bitset is a - // uint64 byte slice. - return len(bl.bitset)*8 + 5*8 -} - -// Size makes Bloom filter with as bitset of size sz. -func (bl *Bloom) Size(sz uint64) { - bl.bitset = make([]uint64, sz>>6) -} - -// Clear resets the Bloom filter. -func (bl *Bloom) Clear() { - for i := range bl.bitset { - bl.bitset[i] = 0 - } -} - -// Set sets the bit[idx] of bitset. -func (bl *Bloom) Set(idx uint64) { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - *(*uint8)(ptr) |= mask[idx%8] -} - -// IsSet checks if bit[idx] of bitset is set, returns true/false. -func (bl *Bloom) IsSet(idx uint64) bool { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1 - return r == 1 -} - -// bloomJSONImExport -// Im/Export structure used by JSONMarshal / JSONUnmarshal -type bloomJSONImExport struct { - FilterSet []byte - SetLocs uint64 -} - -// NewWithBoolset takes a []byte slice and number of locs per entry, -// returns the bloomfilter with a bitset populated according to the input []byte. -func newWithBoolset(bs *[]byte, locs uint64) *Bloom { - bloomfilter := NewBloomFilter(float64(len(*bs)<<3), float64(locs)) - for i, b := range *bs { - *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b - } - return bloomfilter -} - -// JSONUnmarshal takes JSON-Object (type bloomJSONImExport) as []bytes -// returns bloom32 / bloom64 object. -func JSONUnmarshal(dbData []byte) (*Bloom, error) { - bloomImEx := bloomJSONImExport{} - if err := json.Unmarshal(dbData, &bloomImEx); err != nil { - return nil, err - } - buf := bytes.NewBuffer(bloomImEx.FilterSet) - bs := buf.Bytes() - bf := newWithBoolset(&bs, bloomImEx.SetLocs) - return bf, nil -} - -// JSONMarshal returns JSON-object (type bloomJSONImExport) as []byte. -func (bl Bloom) JSONMarshal() []byte { - bloomImEx := bloomJSONImExport{} - bloomImEx.SetLocs = bl.setLocs - bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3) - for i := range bloomImEx.FilterSet { - bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) + - uintptr(i))) - } - data, err := json.Marshal(bloomImEx) - if err != nil { - glog.Fatal("json.Marshal failed: ", err) - } - return data -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/btree.go b/vendor/github.com/dgraph-io/ristretto/z/btree.go deleted file mode 100644 index 12b735b..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/btree.go +++ /dev/null @@ -1,710 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "fmt" - "math" - "os" - "reflect" - "strings" - "unsafe" - - "github.com/dgraph-io/ristretto/z/simd" -) - -var ( - pageSize = os.Getpagesize() - maxKeys = (pageSize / 16) - 1 - oneThird = int(float64(maxKeys) / 3) -) - -const ( - absoluteMax = uint64(math.MaxUint64 - 1) - minSize = 1 << 20 -) - -// Tree represents the structure for custom mmaped B+ tree. -// It supports keys in range [1, math.MaxUint64-1] and values [1, math.Uint64]. -type Tree struct { - buffer *Buffer - data []byte - nextPage uint64 - freePage uint64 - stats TreeStats -} - -func (t *Tree) initRootNode() { - // This is the root node. - t.newNode(0) - // This acts as the rightmost pointer (all the keys are <= this key). - t.Set(absoluteMax, 0) -} - -// NewTree returns an in-memory B+ tree. -func NewTree(tag string) *Tree { - const defaultTag = "tree" - if tag == "" { - tag = defaultTag - } - t := &Tree{buffer: NewBuffer(minSize, tag)} - t.Reset() - return t -} - -// NewTree returns a persistent on-disk B+ tree. -func NewTreePersistent(path string) (*Tree, error) { - t := &Tree{} - var err error - - // Open the buffer from disk and set it to the maximum allocated size. - t.buffer, err = NewBufferPersistent(path, minSize) - if err != nil { - return nil, err - } - t.buffer.offset = uint64(len(t.buffer.buf)) - t.data = t.buffer.Bytes() - - // pageID can never be 0 if the tree has been initialized. - root := t.node(1) - isInitialized := root.pageID() != 0 - - if !isInitialized { - t.nextPage = 1 - t.freePage = 0 - t.initRootNode() - } else { - t.reinit() - } - - return t, nil -} - -// reinit sets the internal variables of a Tree, which are normally stored -// in-memory, but are lost when loading from disk. -func (t *Tree) reinit() { - // Calculate t.nextPage by finding the first node whose pageID is not set. - t.nextPage = 1 - for int(t.nextPage)*pageSize < len(t.data) { - n := t.node(t.nextPage) - if n.pageID() == 0 { - break - } - t.nextPage++ - } - maxPageId := t.nextPage - 1 - - // Calculate t.freePage by finding the page to which no other page points. - // This would be the head of the page linked list. - // tailPages[i] is true if pageId i+1 is not the head of the list. - tailPages := make([]bool, maxPageId) - // Mark all pages containing nodes as tail pages. - t.Iterate(func(n node) { - i := n.pageID() - 1 - tailPages[i] = true - // If this is a leaf node, increment the stats. - if n.isLeaf() { - t.stats.NumLeafKeys += n.numKeys() - } - }) - // pointedPages is a list of page IDs that the tail pages point to. - pointedPages := make([]uint64, 0) - for i, isTail := range tailPages { - if !isTail { - pageId := uint64(i) + 1 - // Skip if nextPageId = 0, as that is equivalent to null page. - if nextPageId := t.node(pageId).uint64(0); nextPageId != 0 { - pointedPages = append(pointedPages, nextPageId) - } - t.stats.NumPagesFree++ - } - } - - // Mark all pages being pointed to as tail pages. - for _, pageId := range pointedPages { - i := pageId - 1 - tailPages[i] = true - } - // There should only be one head page left. - for i, isTail := range tailPages { - if !isTail { - pageId := uint64(i) + 1 - t.freePage = pageId - break - } - } -} - -// Reset resets the tree and truncates it to maxSz. -func (t *Tree) Reset() { - // Tree relies on uninitialized data being zeroed out, so we need to Memclr - // the data before using it again. - Memclr(t.buffer.buf) - t.buffer.Reset() - t.buffer.AllocateOffset(minSize) - t.data = t.buffer.Bytes() - t.stats = TreeStats{} - t.nextPage = 1 - t.freePage = 0 - t.initRootNode() -} - -// Close releases the memory used by the tree. -func (t *Tree) Close() error { - if t == nil { - return nil - } - return t.buffer.Release() -} - -type TreeStats struct { - Allocated int // Derived. - Bytes int // Derived. - NumLeafKeys int // Calculated. - NumPages int // Derived. - NumPagesFree int // Calculated. - Occupancy float64 // Derived. - PageSize int // Derived. -} - -// Stats returns stats about the tree. -func (t *Tree) Stats() TreeStats { - numPages := int(t.nextPage - 1) - out := TreeStats{ - Bytes: numPages * pageSize, - Allocated: len(t.data), - NumLeafKeys: t.stats.NumLeafKeys, - NumPages: numPages, - NumPagesFree: t.stats.NumPagesFree, - PageSize: pageSize, - } - out.Occupancy = 100.0 * float64(out.NumLeafKeys) / float64(maxKeys*numPages) - return out -} - -// BytesToUint64Slice converts a byte slice to a uint64 slice. -func BytesToUint64Slice(b []byte) []uint64 { - if len(b) == 0 { - return nil - } - var u64s []uint64 - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u64s)) - hdr.Len = len(b) / 8 - hdr.Cap = hdr.Len - hdr.Data = uintptr(unsafe.Pointer(&b[0])) - return u64s -} - -func (t *Tree) newNode(bit uint64) node { - var pageId uint64 - if t.freePage > 0 { - pageId = t.freePage - t.stats.NumPagesFree-- - } else { - pageId = t.nextPage - t.nextPage++ - offset := int(pageId) * pageSize - reqSize := offset + pageSize - if reqSize > len(t.data) { - t.buffer.AllocateOffset(reqSize - len(t.data)) - t.data = t.buffer.Bytes() - } - } - n := t.node(pageId) - if t.freePage > 0 { - t.freePage = n.uint64(0) - } - zeroOut(n) - n.setBit(bit) - n.setAt(keyOffset(maxKeys), pageId) - return n -} - -func getNode(data []byte) node { - return node(BytesToUint64Slice(data)) -} - -func zeroOut(data []uint64) { - for i := 0; i < len(data); i++ { - data[i] = 0 - } -} - -func (t *Tree) node(pid uint64) node { - // page does not exist - if pid == 0 { - return nil - } - start := pageSize * int(pid) - return getNode(t.data[start : start+pageSize]) -} - -// Set sets the key-value pair in the tree. -func (t *Tree) Set(k, v uint64) { - if k == math.MaxUint64 || k == 0 { - panic("Error setting zero or MaxUint64") - } - root := t.set(1, k, v) - if root.isFull() { - right := t.split(1) - left := t.newNode(root.bits()) - // Re-read the root as the underlying buffer for tree might have changed during split. - root = t.node(1) - copy(left[:keyOffset(maxKeys)], root) - left.setNumKeys(root.numKeys()) - - // reset the root node. - zeroOut(root[:keyOffset(maxKeys)]) - root.setNumKeys(0) - - // set the pointers for left and right child in the root node. - root.set(left.maxKey(), left.pageID()) - root.set(right.maxKey(), right.pageID()) - } -} - -// For internal nodes, they contain . -// where all entries <= key are stored in the corresponding ptr. -func (t *Tree) set(pid, k, v uint64) node { - n := t.node(pid) - if n.isLeaf() { - t.stats.NumLeafKeys += n.set(k, v) - return n - } - - // This is an internal node. - idx := n.search(k) - if idx >= maxKeys { - panic("search returned index >= maxKeys") - } - // If no key at idx. - if n.key(idx) == 0 { - n.setAt(keyOffset(idx), k) - n.setNumKeys(n.numKeys() + 1) - } - child := t.node(n.val(idx)) - if child == nil { - child = t.newNode(bitLeaf) - n = t.node(pid) - n.setAt(valOffset(idx), child.pageID()) - } - child = t.set(child.pageID(), k, v) - // Re-read n as the underlying buffer for tree might have changed during set. - n = t.node(pid) - if child.isFull() { - // Just consider the left sibling for simplicity. - // if t.shareWithSibling(n, idx) { - // return n - // } - - nn := t.split(child.pageID()) - // Re-read n and child as the underlying buffer for tree might have changed during split. - n = t.node(pid) - child = t.node(n.uint64(valOffset(idx))) - // Set child pointers in the node n. - // Note that key for right node (nn) already exist in node n, but the - // pointer is updated. - n.set(child.maxKey(), child.pageID()) - n.set(nn.maxKey(), nn.pageID()) - } - return n -} - -// Get looks for key and returns the corresponding value. -// If key is not found, 0 is returned. -func (t *Tree) Get(k uint64) uint64 { - if k == math.MaxUint64 || k == 0 { - panic("Does not support getting MaxUint64/Zero") - } - root := t.node(1) - return t.get(root, k) -} - -func (t *Tree) get(n node, k uint64) uint64 { - if n.isLeaf() { - return n.get(k) - } - // This is internal node - idx := n.search(k) - if idx == n.numKeys() || n.key(idx) == 0 { - return 0 - } - child := t.node(n.uint64(valOffset(idx))) - assert(child != nil) - return t.get(child, k) -} - -// DeleteBelow deletes all keys with value under ts. -func (t *Tree) DeleteBelow(ts uint64) { - root := t.node(1) - t.stats.NumLeafKeys = 0 - t.compact(root, ts) - assert(root.numKeys() >= 1) -} - -func (t *Tree) compact(n node, ts uint64) int { - if n.isLeaf() { - numKeys := n.compact(ts) - t.stats.NumLeafKeys += n.numKeys() - return numKeys - } - // Not leaf. - N := n.numKeys() - for i := 0; i < N; i++ { - assert(n.key(i) > 0) - childID := n.uint64(valOffset(i)) - child := t.node(childID) - if rem := t.compact(child, ts); rem == 0 && i < N-1 { - // If no valid key is remaining we can drop this child. However, don't do that if this - // is the max key. - t.stats.NumLeafKeys -= child.numKeys() - child.setAt(0, t.freePage) - t.freePage = childID - n.setAt(valOffset(i), 0) - t.stats.NumPagesFree++ - } - } - // We use ts=1 here because we want to delete all the keys whose value is 0, which means they no - // longer have a valid page for that key. - return n.compact(1) -} - -func (t *Tree) iterate(n node, fn func(node)) { - fn(n) - if n.isLeaf() { - return - } - // Explore children. - for i := 0; i < maxKeys; i++ { - if n.key(i) == 0 { - return - } - childID := n.uint64(valOffset(i)) - assert(childID > 0) - - child := t.node(childID) - t.iterate(child, fn) - } -} - -// Iterate iterates over the tree and executes the fn on each node. -func (t *Tree) Iterate(fn func(node)) { - root := t.node(1) - t.iterate(root, fn) -} - -// IterateKV iterates through all keys and values in the tree. -// If newVal is non-zero, it will be set in the tree. -func (t *Tree) IterateKV(f func(key, val uint64) (newVal uint64)) { - t.Iterate(func(n node) { - // Only leaf nodes contain keys. - if !n.isLeaf() { - return - } - - for i := 0; i < n.numKeys(); i++ { - key := n.key(i) - val := n.val(i) - - // A zero value here means that this is a bogus entry. - if val == 0 { - continue - } - - newVal := f(key, val) - if newVal != 0 { - n.setAt(valOffset(i), newVal) - } - } - }) -} - -func (t *Tree) print(n node, parentID uint64) { - n.print(parentID) - if n.isLeaf() { - return - } - pid := n.pageID() - for i := 0; i < maxKeys; i++ { - if n.key(i) == 0 { - return - } - childID := n.uint64(valOffset(i)) - child := t.node(childID) - t.print(child, pid) - } -} - -// Print iterates over the tree and prints all valid KVs. -func (t *Tree) Print() { - root := t.node(1) - t.print(root, 0) -} - -// Splits the node into two. It moves right half of the keys from the original node to a newly -// created right node. It returns the right node. -func (t *Tree) split(pid uint64) node { - n := t.node(pid) - if !n.isFull() { - panic("This should be called only when n is full") - } - - // Create a new node nn, copy over half the keys from n, and set the parent to n's parent. - nn := t.newNode(n.bits()) - // Re-read n as the underlying buffer for tree might have changed during newNode. - n = t.node(pid) - rightHalf := n[keyOffset(maxKeys/2):keyOffset(maxKeys)] - copy(nn, rightHalf) - nn.setNumKeys(maxKeys - maxKeys/2) - - // Remove entries from node n. - zeroOut(rightHalf) - n.setNumKeys(maxKeys / 2) - return nn -} - -// shareWithSiblingXXX is unused for now. The idea is to move some keys to -// sibling when a node is full. But, I don't see any special benefits in our -// access pattern. It doesn't result in better occupancy ratios. -func (t *Tree) shareWithSiblingXXX(n node, idx int) bool { - if idx == 0 { - return false - } - left := t.node(n.val(idx - 1)) - ns := left.numKeys() - if ns >= maxKeys/2 { - // Sibling is already getting full. - return false - } - - right := t.node(n.val(idx)) - // Copy over keys from right child to left child. - copied := copy(left[keyOffset(ns):], right[:keyOffset(oneThird)]) - copied /= 2 // Considering that key-val constitute one key. - left.setNumKeys(ns + copied) - - // Update the max key in parent node n for the left sibling. - n.setAt(keyOffset(idx-1), left.maxKey()) - - // Now move keys to left for the right sibling. - until := copy(right, right[keyOffset(oneThird):keyOffset(maxKeys)]) - right.setNumKeys(until / 2) - zeroOut(right[until:keyOffset(maxKeys)]) - return true -} - -// Each node in the node is of size pageSize. Two kinds of nodes. Leaf nodes and internal nodes. -// Leaf nodes only contain the data. Internal nodes would contain the key and the offset to the -// child node. -// Internal node would have first entry as -// <0 offset to child>, <1000 offset>, <5000 offset>, and so on... -// Leaf nodes would just have: , , and so on... -// Last 16 bytes of the node are off limits. -// | pageID (8 bytes) | metaBits (1 byte) | 3 free bytes | numKeys (4 bytes) | -type node []uint64 - -func (n node) uint64(start int) uint64 { return n[start] } - -// func (n node) uint32(start int) uint32 { return *(*uint32)(unsafe.Pointer(&n[start])) } - -func keyOffset(i int) int { return 2 * i } -func valOffset(i int) int { return 2*i + 1 } -func (n node) numKeys() int { return int(n.uint64(valOffset(maxKeys)) & 0xFFFFFFFF) } -func (n node) pageID() uint64 { return n.uint64(keyOffset(maxKeys)) } -func (n node) key(i int) uint64 { return n.uint64(keyOffset(i)) } -func (n node) val(i int) uint64 { return n.uint64(valOffset(i)) } -func (n node) data(i int) []uint64 { return n[keyOffset(i):keyOffset(i+1)] } - -func (n node) setAt(start int, k uint64) { - n[start] = k -} - -func (n node) setNumKeys(num int) { - idx := valOffset(maxKeys) - val := n[idx] - val &= 0xFFFFFFFF00000000 - val |= uint64(num) - n[idx] = val -} - -func (n node) moveRight(lo int) { - hi := n.numKeys() - assert(hi != maxKeys) - // copy works despite of overlap in src and dst. - // See https://golang.org/pkg/builtin/#copy - copy(n[keyOffset(lo+1):keyOffset(hi+1)], n[keyOffset(lo):keyOffset(hi)]) -} - -const ( - bitLeaf = uint64(1 << 63) -) - -func (n node) setBit(b uint64) { - vo := valOffset(maxKeys) - val := n[vo] - val &= 0xFFFFFFFF - val |= b - n[vo] = val -} -func (n node) bits() uint64 { - return n.val(maxKeys) & 0xFF00000000000000 -} -func (n node) isLeaf() bool { - return n.bits()&bitLeaf > 0 -} - -// isFull checks that the node is already full. -func (n node) isFull() bool { - return n.numKeys() == maxKeys -} - -// Search returns the index of a smallest key >= k in a node. -func (n node) search(k uint64) int { - N := n.numKeys() - if N < 4 { - for i := 0; i < N; i++ { - if ki := n.key(i); ki >= k { - return i - } - } - return N - } - return int(simd.Search(n[:2*N], k)) - // lo, hi := 0, N - // // Reduce the search space using binary seach and then do linear search. - // for hi-lo > 32 { - // mid := (hi + lo) / 2 - // km := n.key(mid) - // if k == km { - // return mid - // } - // if k > km { - // // key is greater than the key at mid, so move right. - // lo = mid + 1 - // } else { - // // else move left. - // hi = mid - // } - // } - // for i := lo; i <= hi; i++ { - // if ki := n.key(i); ki >= k { - // return i - // } - // } - // return N -} -func (n node) maxKey() uint64 { - idx := n.numKeys() - // idx points to the first key which is zero. - if idx > 0 { - idx-- - } - return n.key(idx) -} - -// compacts the node i.e., remove all the kvs with value < lo. It returns the remaining number of -// keys. -func (n node) compact(lo uint64) int { - N := n.numKeys() - mk := n.maxKey() - var left, right int - for right = 0; right < N; right++ { - if n.val(right) < lo && n.key(right) < mk { - // Skip over this key. Don't copy it. - continue - } - // Valid data. Copy it from right to left. Advance left. - if left != right { - copy(n.data(left), n.data(right)) - } - left++ - } - // zero out rest of the kv pairs. - zeroOut(n[keyOffset(left):keyOffset(right)]) - n.setNumKeys(left) - - // If the only key we have is the max key, and its value is less than lo, then we can indicate - // to the caller by returning a zero that it's OK to drop the node. - if left == 1 && n.key(0) == mk && n.val(0) < lo { - return 0 - } - return left -} - -func (n node) get(k uint64) uint64 { - idx := n.search(k) - // key is not found - if idx == n.numKeys() { - return 0 - } - if ki := n.key(idx); ki == k { - return n.val(idx) - } - return 0 -} - -// set returns true if it added a new key. -func (n node) set(k, v uint64) (numAdded int) { - idx := n.search(k) - ki := n.key(idx) - if n.numKeys() == maxKeys { - // This happens during split of non-root node, when we are updating the child pointer of - // right node. Hence, the key should already exist. - assert(ki == k) - } - if ki > k { - // Found the first entry which is greater than k. So, we need to fit k - // just before it. For that, we should move the rest of the data in the - // node to the right to make space for k. - n.moveRight(idx) - } - // If the k does not exist already, increment the number of keys. - if ki != k { - n.setNumKeys(n.numKeys() + 1) - numAdded = 1 - } - if ki == 0 || ki >= k { - n.setAt(keyOffset(idx), k) - n.setAt(valOffset(idx), v) - return - } - panic("shouldn't reach here") -} - -func (n node) iterate(fn func(node, int)) { - for i := 0; i < maxKeys; i++ { - if k := n.key(i); k > 0 { - fn(n, i) - } else { - break - } - } -} - -func (n node) print(parentID uint64) { - var keys []string - n.iterate(func(n node, i int) { - keys = append(keys, fmt.Sprintf("%d", n.key(i))) - }) - if len(keys) > 8 { - copy(keys[4:], keys[len(keys)-4:]) - keys[3] = "..." - keys = keys[:8] - } - fmt.Printf("%d Child of: %d num keys: %d keys: %s\n", - n.pageID(), parentID, n.numKeys(), strings.Join(keys, " ")) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/buffer.go b/vendor/github.com/dgraph-io/ristretto/z/buffer.go deleted file mode 100644 index 5a22de8..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/buffer.go +++ /dev/null @@ -1,544 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "os" - "sort" - "sync/atomic" - - "github.com/golang/glog" - "github.com/pkg/errors" -) - -const ( - defaultCapacity = 64 - defaultTag = "buffer" -) - -// Buffer is equivalent of bytes.Buffer without the ability to read. It is NOT thread-safe. -// -// In UseCalloc mode, z.Calloc is used to allocate memory, which depending upon how the code is -// compiled could use jemalloc for allocations. -// -// In UseMmap mode, Buffer uses file mmap to allocate memory. This allows us to store big data -// structures without using physical memory. -// -// MaxSize can be set to limit the memory usage. -type Buffer struct { - padding uint64 // number of starting bytes used for padding - offset uint64 // used length of the buffer - buf []byte // backing slice for the buffer - bufType BufferType // type of the underlying buffer - curSz int // capacity of the buffer - maxSz int // causes a panic if the buffer grows beyond this size - mmapFile *MmapFile // optional mmap backing for the buffer - autoMmapAfter int // Calloc falls back to an mmaped tmpfile after crossing this size - autoMmapDir string // directory for autoMmap to create a tempfile in - persistent bool // when enabled, Release will not delete the underlying mmap file - tag string // used for jemalloc stats -} - -func NewBuffer(capacity int, tag string) *Buffer { - if capacity < defaultCapacity { - capacity = defaultCapacity - } - if tag == "" { - tag = defaultTag - } - return &Buffer{ - buf: Calloc(capacity, tag), - bufType: UseCalloc, - curSz: capacity, - offset: 8, - padding: 8, - tag: tag, - } -} - -// It is the caller's responsibility to set offset after this, because Buffer -// doesn't remember what it was. -func NewBufferPersistent(path string, capacity int) (*Buffer, error) { - file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666) - if err != nil { - return nil, err - } - buffer, err := newBufferFile(file, capacity) - if err != nil { - return nil, err - } - buffer.persistent = true - return buffer, nil -} - -func NewBufferTmp(dir string, capacity int) (*Buffer, error) { - if dir == "" { - dir = tmpDir - } - file, err := ioutil.TempFile(dir, "buffer") - if err != nil { - return nil, err - } - return newBufferFile(file, capacity) -} - -func newBufferFile(file *os.File, capacity int) (*Buffer, error) { - if capacity < defaultCapacity { - capacity = defaultCapacity - } - mmapFile, err := OpenMmapFileUsing(file, capacity, true) - if err != nil && err != NewFile { - return nil, err - } - buf := &Buffer{ - buf: mmapFile.Data, - bufType: UseMmap, - curSz: len(mmapFile.Data), - mmapFile: mmapFile, - offset: 8, - padding: 8, - } - return buf, nil -} - -func NewBufferSlice(slice []byte) *Buffer { - return &Buffer{ - offset: uint64(len(slice)), - buf: slice, - bufType: UseInvalid, - } -} - -func (b *Buffer) WithAutoMmap(threshold int, path string) *Buffer { - if b.bufType != UseCalloc { - panic("can only autoMmap with UseCalloc") - } - b.autoMmapAfter = threshold - if path == "" { - b.autoMmapDir = tmpDir - } else { - b.autoMmapDir = path - } - return b -} - -func (b *Buffer) WithMaxSize(size int) *Buffer { - b.maxSz = size - return b -} - -func (b *Buffer) IsEmpty() bool { - return int(b.offset) == b.StartOffset() -} - -// LenWithPadding would return the number of bytes written to the buffer so far -// plus the padding at the start of the buffer. -func (b *Buffer) LenWithPadding() int { - return int(atomic.LoadUint64(&b.offset)) -} - -// LenNoPadding would return the number of bytes written to the buffer so far -// (without the padding). -func (b *Buffer) LenNoPadding() int { - return int(atomic.LoadUint64(&b.offset) - b.padding) -} - -// Bytes would return all the written bytes as a slice. -func (b *Buffer) Bytes() []byte { - off := atomic.LoadUint64(&b.offset) - return b.buf[b.padding:off] -} - -// Grow would grow the buffer to have at least n more bytes. In case the buffer is at capacity, it -// would reallocate twice the size of current capacity + n, to ensure n bytes can be written to the -// buffer without further allocation. In UseMmap mode, this might result in underlying file -// expansion. -func (b *Buffer) Grow(n int) { - if b.buf == nil { - panic("z.Buffer needs to be initialized before using") - } - if b.maxSz > 0 && int(b.offset)+n > b.maxSz { - err := fmt.Errorf( - "z.Buffer max size exceeded: %d offset: %d grow: %d", b.maxSz, b.offset, n) - panic(err) - } - if int(b.offset)+n < b.curSz { - return - } - - // Calculate new capacity. - growBy := b.curSz + n - // Don't allocate more than 1GB at a time. - if growBy > 1<<30 { - growBy = 1 << 30 - } - // Allocate at least n, even if it exceeds the 1GB limit above. - if n > growBy { - growBy = n - } - b.curSz += growBy - - switch b.bufType { - case UseCalloc: - // If autoMmap gets triggered, copy the slice over to an mmaped file. - if b.autoMmapAfter > 0 && b.curSz > b.autoMmapAfter { - b.bufType = UseMmap - file, err := ioutil.TempFile(b.autoMmapDir, "") - if err != nil { - panic(err) - } - mmapFile, err := OpenMmapFileUsing(file, b.curSz, true) - if err != nil && err != NewFile { - panic(err) - } - assert(int(b.offset) == copy(mmapFile.Data, b.buf[:b.offset])) - Free(b.buf) - b.mmapFile = mmapFile - b.buf = mmapFile.Data - break - } - - // Else, reallocate the slice. - newBuf := Calloc(b.curSz, b.tag) - assert(int(b.offset) == copy(newBuf, b.buf[:b.offset])) - Free(b.buf) - b.buf = newBuf - - case UseMmap: - // Truncate and remap the underlying file. - if err := b.mmapFile.Truncate(int64(b.curSz)); err != nil { - err = errors.Wrapf(err, - "while trying to truncate file: %s to size: %d", b.mmapFile.Fd.Name(), b.curSz) - panic(err) - } - b.buf = b.mmapFile.Data - - default: - panic("can only use Grow on UseCalloc and UseMmap buffers") - } -} - -// Allocate is a way to get a slice of size n back from the buffer. This slice can be directly -// written to. Warning: Allocate is not thread-safe. The byte slice returned MUST be used before -// further calls to Buffer. -func (b *Buffer) Allocate(n int) []byte { - b.Grow(n) - off := b.offset - b.offset += uint64(n) - return b.buf[off:int(b.offset)] -} - -// AllocateOffset works the same way as allocate, but instead of returning a byte slice, it returns -// the offset of the allocation. -func (b *Buffer) AllocateOffset(n int) int { - b.Grow(n) - b.offset += uint64(n) - return int(b.offset) - n -} - -func (b *Buffer) writeLen(sz int) { - buf := b.Allocate(4) - binary.BigEndian.PutUint32(buf, uint32(sz)) -} - -// SliceAllocate would encode the size provided into the buffer, followed by a call to Allocate, -// hence returning the slice of size sz. This can be used to allocate a lot of small buffers into -// this big buffer. -// Note that SliceAllocate should NOT be mixed with normal calls to Write. -func (b *Buffer) SliceAllocate(sz int) []byte { - b.Grow(4 + sz) - b.writeLen(sz) - return b.Allocate(sz) -} - -func (b *Buffer) StartOffset() int { - return int(b.padding) -} - -func (b *Buffer) WriteSlice(slice []byte) { - dst := b.SliceAllocate(len(slice)) - assert(len(slice) == copy(dst, slice)) -} - -func (b *Buffer) SliceIterate(f func(slice []byte) error) error { - if b.IsEmpty() { - return nil - } - slice, next := []byte{}, b.StartOffset() - for next >= 0 { - slice, next = b.Slice(next) - if len(slice) == 0 { - continue - } - if err := f(slice); err != nil { - return err - } - } - return nil -} - -const ( - UseCalloc BufferType = iota - UseMmap - UseInvalid -) - -type BufferType int - -func (t BufferType) String() string { - switch t { - case UseCalloc: - return "UseCalloc" - case UseMmap: - return "UseMmap" - default: - return "UseInvalid" - } -} - -type LessFunc func(a, b []byte) bool -type sortHelper struct { - offsets []int - b *Buffer - tmp *Buffer - less LessFunc - small []int -} - -func (s *sortHelper) sortSmall(start, end int) { - s.tmp.Reset() - s.small = s.small[:0] - next := start - for next >= 0 && next < end { - s.small = append(s.small, next) - _, next = s.b.Slice(next) - } - - // We are sorting the slices pointed to by s.small offsets, but only moving the offsets around. - sort.Slice(s.small, func(i, j int) bool { - left, _ := s.b.Slice(s.small[i]) - right, _ := s.b.Slice(s.small[j]) - return s.less(left, right) - }) - // Now we iterate over the s.small offsets and copy over the slices. The result is now in order. - for _, off := range s.small { - s.tmp.Write(rawSlice(s.b.buf[off:])) - } - assert(end-start == copy(s.b.buf[start:end], s.tmp.Bytes())) -} - -func assert(b bool) { - if !b { - glog.Fatalf("%+v", errors.Errorf("Assertion failure")) - } -} -func check(err error) { - if err != nil { - glog.Fatalf("%+v", err) - } -} -func check2(_ interface{}, err error) { - check(err) -} - -func (s *sortHelper) merge(left, right []byte, start, end int) { - if len(left) == 0 || len(right) == 0 { - return - } - s.tmp.Reset() - check2(s.tmp.Write(left)) - left = s.tmp.Bytes() - - var ls, rs []byte - - copyLeft := func() { - assert(len(ls) == copy(s.b.buf[start:], ls)) - left = left[len(ls):] - start += len(ls) - } - copyRight := func() { - assert(len(rs) == copy(s.b.buf[start:], rs)) - right = right[len(rs):] - start += len(rs) - } - - for start < end { - if len(left) == 0 { - assert(len(right) == copy(s.b.buf[start:end], right)) - return - } - if len(right) == 0 { - assert(len(left) == copy(s.b.buf[start:end], left)) - return - } - ls = rawSlice(left) - rs = rawSlice(right) - - // We skip the first 4 bytes in the rawSlice, because that stores the length. - if s.less(ls[4:], rs[4:]) { - copyLeft() - } else { - copyRight() - } - } -} - -func (s *sortHelper) sort(lo, hi int) []byte { - assert(lo <= hi) - - mid := lo + (hi-lo)/2 - loff, hoff := s.offsets[lo], s.offsets[hi] - if lo == mid { - // No need to sort, just return the buffer. - return s.b.buf[loff:hoff] - } - - // lo, mid would sort from [offset[lo], offset[mid]) . - left := s.sort(lo, mid) - // Typically we'd use mid+1, but here mid represents an offset in the buffer. Each offset - // contains a thousand entries. So, if we do mid+1, we'd skip over those entries. - right := s.sort(mid, hi) - - s.merge(left, right, loff, hoff) - return s.b.buf[loff:hoff] -} - -// SortSlice is like SortSliceBetween but sorting over the entire buffer. -func (b *Buffer) SortSlice(less func(left, right []byte) bool) { - b.SortSliceBetween(b.StartOffset(), int(b.offset), less) -} -func (b *Buffer) SortSliceBetween(start, end int, less LessFunc) { - if start >= end { - return - } - if start == 0 { - panic("start can never be zero") - } - - var offsets []int - next, count := start, 0 - for next >= 0 && next < end { - if count%1024 == 0 { - offsets = append(offsets, next) - } - _, next = b.Slice(next) - count++ - } - assert(len(offsets) > 0) - if offsets[len(offsets)-1] != end { - offsets = append(offsets, end) - } - - szTmp := int(float64((end-start)/2) * 1.1) - s := &sortHelper{ - offsets: offsets, - b: b, - less: less, - small: make([]int, 0, 1024), - tmp: NewBuffer(szTmp, b.tag), - } - defer s.tmp.Release() - - left := offsets[0] - for _, off := range offsets[1:] { - s.sortSmall(left, off) - left = off - } - s.sort(0, len(offsets)-1) -} - -func rawSlice(buf []byte) []byte { - sz := binary.BigEndian.Uint32(buf) - return buf[:4+int(sz)] -} - -// Slice would return the slice written at offset. -func (b *Buffer) Slice(offset int) ([]byte, int) { - if offset >= int(b.offset) { - return nil, -1 - } - - sz := binary.BigEndian.Uint32(b.buf[offset:]) - start := offset + 4 - next := start + int(sz) - res := b.buf[start:next] - if next >= int(b.offset) { - next = -1 - } - return res, next -} - -// SliceOffsets is an expensive function. Use sparingly. -func (b *Buffer) SliceOffsets() []int { - next := b.StartOffset() - var offsets []int - for next >= 0 { - offsets = append(offsets, next) - _, next = b.Slice(next) - } - return offsets -} - -func (b *Buffer) Data(offset int) []byte { - if offset > b.curSz { - panic("offset beyond current size") - } - return b.buf[offset:b.curSz] -} - -// Write would write p bytes to the buffer. -func (b *Buffer) Write(p []byte) (n int, err error) { - n = len(p) - b.Grow(n) - assert(n == copy(b.buf[b.offset:], p)) - b.offset += uint64(n) - return n, nil -} - -// Reset would reset the buffer to be reused. -func (b *Buffer) Reset() { - b.offset = uint64(b.StartOffset()) -} - -// Release would free up the memory allocated by the buffer. Once the usage of buffer is done, it is -// important to call Release, otherwise a memory leak can happen. -func (b *Buffer) Release() error { - if b == nil { - return nil - } - switch b.bufType { - case UseCalloc: - Free(b.buf) - case UseMmap: - if b.mmapFile == nil { - return nil - } - path := b.mmapFile.Fd.Name() - if err := b.mmapFile.Close(-1); err != nil { - return errors.Wrapf(err, "while closing file: %s", path) - } - if !b.persistent { - if err := os.Remove(path); err != nil { - return errors.Wrapf(err, "while deleting file %s", path) - } - } - } - return nil -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc.go deleted file mode 100644 index 2e5d613..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc.go +++ /dev/null @@ -1,42 +0,0 @@ -package z - -import "sync/atomic" - -var numBytes int64 - -// NumAllocBytes returns the number of bytes allocated using calls to z.Calloc. The allocations -// could be happening via either Go or jemalloc, depending upon the build flags. -func NumAllocBytes() int64 { - return atomic.LoadInt64(&numBytes) -} - -// MemStats is used to fetch JE Malloc Stats. The stats are fetched from -// the mallctl namespace http://jemalloc.net/jemalloc.3.html#mallctl_namespace. -type MemStats struct { - // Total number of bytes allocated by the application. - // http://jemalloc.net/jemalloc.3.html#stats.allocated - Allocated uint64 - // Total number of bytes in active pages allocated by the application. This - // is a multiple of the page size, and greater than or equal to - // Allocated. - // http://jemalloc.net/jemalloc.3.html#stats.active - Active uint64 - // Maximum number of bytes in physically resident data pages mapped by the - // allocator, comprising all pages dedicated to allocator metadata, pages - // backing active allocations, and unused dirty pages. This is a maximum - // rather than precise because pages may not actually be physically - // resident if they correspond to demand-zeroed virtual memory that has not - // yet been touched. This is a multiple of the page size, and is larger - // than stats.active. - // http://jemalloc.net/jemalloc.3.html#stats.resident - Resident uint64 - // Total number of bytes in virtual memory mappings that were retained - // rather than being returned to the operating system via e.g. munmap(2) or - // similar. Retained virtual memory is typically untouched, decommitted, or - // purged, so it has no strongly associated physical memory (see extent - // hooks http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks for - // details). Retained memory is excluded from mapped memory statistics, - // e.g. stats.mapped (http://jemalloc.net/jemalloc.3.html#stats.mapped). - // http://jemalloc.net/jemalloc.3.html#stats.retained - Retained uint64 -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go deleted file mode 100644 index 3a04426..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build 386 amd64p32 arm armbe mips mipsle mips64p32 mips64p32le ppc sparc - -package z - -const ( - // MaxArrayLen is a safe maximum length for slices on this architecture. - MaxArrayLen = 1<<31 - 1 - // MaxBufferSize is the size of virtually unlimited buffer on this architecture. - MaxBufferSize = 1 << 30 -) diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go deleted file mode 100644 index b898248..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build amd64 arm64 arm64be ppc64 ppc64le mips64 mips64le riscv64 s390x sparc64 - -package z - -const ( - // MaxArrayLen is a safe maximum length for slices on this architecture. - MaxArrayLen = 1<<50 - 1 - // MaxBufferSize is the size of virtually unlimited buffer on this architecture. - MaxBufferSize = 256 << 30 -) diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go deleted file mode 100644 index 904d73a..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build jemalloc - -package z - -/* -#cgo LDFLAGS: /usr/local/lib/libjemalloc.a -L/usr/local/lib -Wl,-rpath,/usr/local/lib -ljemalloc -lm -lstdc++ -pthread -ldl -#include -#include -*/ -import "C" -import ( - "bytes" - "fmt" - "sync" - "sync/atomic" - "unsafe" - - "github.com/dustin/go-humanize" -) - -// The go:linkname directives provides backdoor access to private functions in -// the runtime. Below we're accessing the throw function. - -//go:linkname throw runtime.throw -func throw(s string) - -// New allocates a slice of size n. The returned slice is from manually managed -// memory and MUST be released by calling Free. Failure to do so will result in -// a memory leak. -// -// Compile jemalloc with ./configure --with-jemalloc-prefix="je_" -// https://android.googlesource.com/platform/external/jemalloc_new/+/6840b22e8e11cb68b493297a5cd757d6eaa0b406/TUNING.md -// These two config options seems useful for frequent allocations and deallocations in -// multi-threaded programs (like we have). -// JE_MALLOC_CONF="background_thread:true,metadata_thp:auto" -// -// Compile Go program with `go build -tags=jemalloc` to enable this. - -type dalloc struct { - t string - sz int -} - -var dallocsMu sync.Mutex -var dallocs map[unsafe.Pointer]*dalloc - -func init() { - // By initializing dallocs, we can start tracking allocations and deallocations via z.Calloc. - dallocs = make(map[unsafe.Pointer]*dalloc) -} - -func Calloc(n int, tag string) []byte { - if n == 0 { - return make([]byte, 0) - } - // We need to be conscious of the Cgo pointer passing rules: - // - // https://golang.org/cmd/cgo/#hdr-Passing_pointers - // - // ... - // Note: the current implementation has a bug. While Go code is permitted - // to write nil or a C pointer (but not a Go pointer) to C memory, the - // current implementation may sometimes cause a runtime error if the - // contents of the C memory appear to be a Go pointer. Therefore, avoid - // passing uninitialized C memory to Go code if the Go code is going to - // store pointer values in it. Zero out the memory in C before passing it - // to Go. - - ptr := C.je_calloc(C.size_t(n), 1) - if ptr == nil { - // NB: throw is like panic, except it guarantees the process will be - // terminated. The call below is exactly what the Go runtime invokes when - // it cannot allocate memory. - throw("out of memory") - } - - uptr := unsafe.Pointer(ptr) - dallocsMu.Lock() - dallocs[uptr] = &dalloc{ - t: tag, - sz: n, - } - dallocsMu.Unlock() - atomic.AddInt64(&numBytes, int64(n)) - // Interpret the C pointer as a pointer to a Go array, then slice. - return (*[MaxArrayLen]byte)(uptr)[:n:n] -} - -// CallocNoRef does the exact same thing as Calloc with jemalloc enabled. -func CallocNoRef(n int, tag string) []byte { - return Calloc(n, tag) -} - -// Free frees the specified slice. -func Free(b []byte) { - if sz := cap(b); sz != 0 { - b = b[:cap(b)] - ptr := unsafe.Pointer(&b[0]) - C.je_free(ptr) - atomic.AddInt64(&numBytes, -int64(sz)) - dallocsMu.Lock() - delete(dallocs, ptr) - dallocsMu.Unlock() - } -} - -func Leaks() string { - if dallocs == nil { - return "Leak detection disabled. Enable with 'leak' build flag." - } - dallocsMu.Lock() - defer dallocsMu.Unlock() - if len(dallocs) == 0 { - return "NO leaks found." - } - m := make(map[string]int) - for _, da := range dallocs { - m[da.t] += da.sz - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "Allocations:\n") - for f, sz := range m { - fmt.Fprintf(&buf, "%s at file: %s\n", humanize.IBytes(uint64(sz)), f) - } - return buf.String() -} - -// ReadMemStats populates stats with JE Malloc statistics. -func ReadMemStats(stats *MemStats) { - if stats == nil { - return - } - // Call an epoch mallclt to refresh the stats data as mentioned in the docs. - // http://jemalloc.net/jemalloc.3.html#epoch - // Note: This epoch mallctl is as expensive as a malloc call. It takes up the - // malloc_mutex_lock. - epoch := 1 - sz := unsafe.Sizeof(&epoch) - C.je_mallctl( - (C.CString)("epoch"), - unsafe.Pointer(&epoch), - (*C.size_t)(unsafe.Pointer(&sz)), - unsafe.Pointer(&epoch), - (C.size_t)(unsafe.Sizeof(epoch))) - stats.Allocated = fetchStat("stats.allocated") - stats.Active = fetchStat("stats.active") - stats.Resident = fetchStat("stats.resident") - stats.Retained = fetchStat("stats.retained") -} - -// fetchStat is used to read a specific attribute from je malloc stats using mallctl. -func fetchStat(s string) uint64 { - var out uint64 - sz := unsafe.Sizeof(&out) - C.je_mallctl( - (C.CString)(s), // Query: eg: stats.allocated, stats.resident, etc. - unsafe.Pointer(&out), // Variable to store the output. - (*C.size_t)(unsafe.Pointer(&sz)), // Size of the output variable. - nil, // Input variable used to set a value. - 0) // Size of the input variable. - return out -} - -func StatsPrint() { - opts := C.CString("mdablxe") - C.je_malloc_stats_print(nil, nil, opts) - C.free(unsafe.Pointer(opts)) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go deleted file mode 100644 index 93ceedf..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build !jemalloc !cgo - -package z - -import ( - "fmt" -) - -// Provides versions of Calloc, CallocNoRef, etc when jemalloc is not available -// (eg: build without jemalloc tag). - -// Calloc allocates a slice of size n. -func Calloc(n int, tag string) []byte { - return make([]byte, n) -} - -// CallocNoRef will not give you memory back without jemalloc. -func CallocNoRef(n int, tag string) []byte { - // We do the add here just to stay compatible with a corresponding Free call. - return nil -} - -// Free does not do anything in this mode. -func Free(b []byte) {} - -func Leaks() string { return "Leaks: Using Go memory" } -func StatsPrint() { - fmt.Println("Using Go memory") -} - -// ReadMemStats doesn't do anything since all the memory is being managed -// by the Go runtime. -func ReadMemStats(_ *MemStats) { return } diff --git a/vendor/github.com/dgraph-io/ristretto/z/file.go b/vendor/github.com/dgraph-io/ristretto/z/file.go deleted file mode 100644 index 880caf0..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/file.go +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "encoding/binary" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// MmapFile represents an mmapd file and includes both the buffer to the data -// and the file descriptor. -type MmapFile struct { - Data []byte - Fd *os.File -} - -var NewFile = errors.New("Create a new file") - -func OpenMmapFileUsing(fd *os.File, sz int, writable bool) (*MmapFile, error) { - filename := fd.Name() - fi, err := fd.Stat() - if err != nil { - return nil, errors.Wrapf(err, "cannot stat file: %s", filename) - } - - var rerr error - fileSize := fi.Size() - if sz > 0 && fileSize == 0 { - // If file is empty, truncate it to sz. - if err := fd.Truncate(int64(sz)); err != nil { - return nil, errors.Wrapf(err, "error while truncation") - } - fileSize = int64(sz) - rerr = NewFile - } - - // fmt.Printf("Mmaping file: %s with writable: %v filesize: %d\n", fd.Name(), writable, fileSize) - buf, err := Mmap(fd, writable, fileSize) // Mmap up to file size. - if err != nil { - return nil, errors.Wrapf(err, "while mmapping %s with size: %d", fd.Name(), fileSize) - } - - if fileSize == 0 { - dir, _ := filepath.Split(filename) - go SyncDir(dir) - } - return &MmapFile{ - Data: buf, - Fd: fd, - }, rerr -} - -// OpenMmapFile opens an existing file or creates a new file. If the file is -// created, it would truncate the file to maxSz. In both cases, it would mmap -// the file to maxSz and returned it. In case the file is created, z.NewFile is -// returned. -func OpenMmapFile(filename string, flag int, maxSz int) (*MmapFile, error) { - // fmt.Printf("opening file %s with flag: %v\n", filename, flag) - fd, err := os.OpenFile(filename, flag, 0666) - if err != nil { - return nil, errors.Wrapf(err, "unable to open: %s", filename) - } - writable := true - if flag == os.O_RDONLY { - writable = false - } - return OpenMmapFileUsing(fd, maxSz, writable) -} - -type mmapReader struct { - Data []byte - offset int -} - -func (mr *mmapReader) Read(buf []byte) (int, error) { - if mr.offset > len(mr.Data) { - return 0, io.EOF - } - n := copy(buf, mr.Data[mr.offset:]) - mr.offset += n - if n < len(buf) { - return n, io.EOF - } - return n, nil -} - -func (m *MmapFile) NewReader(offset int) io.Reader { - return &mmapReader{ - Data: m.Data, - offset: offset, - } -} - -// Bytes returns data starting from offset off of size sz. If there's not enough data, it would -// return nil slice and io.EOF. -func (m *MmapFile) Bytes(off, sz int) ([]byte, error) { - if len(m.Data[off:]) < sz { - return nil, io.EOF - } - return m.Data[off : off+sz], nil -} - -// Slice returns the slice at the given offset. -func (m *MmapFile) Slice(offset int) []byte { - sz := binary.BigEndian.Uint32(m.Data[offset:]) - start := offset + 4 - next := start + int(sz) - if next > len(m.Data) { - return []byte{} - } - res := m.Data[start:next] - return res -} - -// AllocateSlice allocates a slice of the given size at the given offset. -func (m *MmapFile) AllocateSlice(sz, offset int) ([]byte, int, error) { - start := offset + 4 - - // If the file is too small, double its size or increase it by 1GB, whichever is smaller. - if start+sz > len(m.Data) { - const oneGB = 1 << 30 - growBy := len(m.Data) - if growBy > oneGB { - growBy = oneGB - } - if growBy < sz+4 { - growBy = sz + 4 - } - if err := m.Truncate(int64(len(m.Data) + growBy)); err != nil { - return nil, 0, err - } - } - - binary.BigEndian.PutUint32(m.Data[offset:], uint32(sz)) - return m.Data[start : start+sz], start + sz, nil -} - -func (m *MmapFile) Sync() error { - if m == nil { - return nil - } - return Msync(m.Data) -} - -func (m *MmapFile) Delete() error { - // Badger can set the m.Data directly, without setting any Fd. In that case, this should be a - // NOOP. - if m.Fd == nil { - return nil - } - - if err := Munmap(m.Data); err != nil { - return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err) - } - m.Data = nil - if err := m.Fd.Truncate(0); err != nil { - return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err) - } - if err := m.Fd.Close(); err != nil { - return fmt.Errorf("while close file: %s, error: %v\n", m.Fd.Name(), err) - } - return os.Remove(m.Fd.Name()) -} - -// Close would close the file. It would also truncate the file if maxSz >= 0. -func (m *MmapFile) Close(maxSz int64) error { - // Badger can set the m.Data directly, without setting any Fd. In that case, this should be a - // NOOP. - if m.Fd == nil { - return nil - } - if err := m.Sync(); err != nil { - return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err) - } - if err := Munmap(m.Data); err != nil { - return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err) - } - if maxSz >= 0 { - if err := m.Fd.Truncate(maxSz); err != nil { - return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err) - } - } - return m.Fd.Close() -} - -func SyncDir(dir string) error { - df, err := os.Open(dir) - if err != nil { - return errors.Wrapf(err, "while opening %s", dir) - } - if err := df.Sync(); err != nil { - return errors.Wrapf(err, "while syncing %s", dir) - } - if err := df.Close(); err != nil { - return errors.Wrapf(err, "while closing %s", dir) - } - return nil -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/file_default.go b/vendor/github.com/dgraph-io/ristretto/z/file_default.go deleted file mode 100644 index d9c0db4..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/file_default.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !linux - -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import "fmt" - -// Truncate would truncate the mmapped file to the given size. On Linux, we truncate -// the underlying file and then call mremap, but on other systems, we unmap first, -// then truncate, then re-map. -func (m *MmapFile) Truncate(maxSz int64) error { - if err := m.Sync(); err != nil { - return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err) - } - if err := Munmap(m.Data); err != nil { - return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err) - } - if err := m.Fd.Truncate(maxSz); err != nil { - return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err) - } - var err error - m.Data, err = Mmap(m.Fd, true, maxSz) // Mmap up to max size. - return err -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/file_linux.go b/vendor/github.com/dgraph-io/ristretto/z/file_linux.go deleted file mode 100644 index 7f670bd..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/file_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "fmt" -) - -// Truncate would truncate the mmapped file to the given size. On Linux, we truncate -// the underlying file and then call mremap, but on other systems, we unmap first, -// then truncate, then re-map. -func (m *MmapFile) Truncate(maxSz int64) error { - if err := m.Sync(); err != nil { - return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err) - } - if err := m.Fd.Truncate(maxSz); err != nil { - return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err) - } - - var err error - m.Data, err = mremap(m.Data, int(maxSz)) // Mmap up to max size. - return err -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/flags.go b/vendor/github.com/dgraph-io/ristretto/z/flags.go deleted file mode 100644 index a55c474..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/flags.go +++ /dev/null @@ -1,311 +0,0 @@ -package z - -import ( - "fmt" - "os" - "os/user" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/glog" - "github.com/pkg/errors" -) - -// SuperFlagHelp makes it really easy to generate command line `--help` output for a SuperFlag. For -// example: -// -// const flagDefaults = `enabled=true; path=some/path;` -// -// var help string = z.NewSuperFlagHelp(flagDefaults). -// Flag("enabled", "Turns on ."). -// Flag("path", "The path to ."). -// Flag("another", "Not present in defaults, but still included."). -// String() -// -// The `help` string would then contain: -// -// enabled=true; Turns on . -// path=some/path; The path to . -// another=; Not present in defaults, but still included. -// -// All flags are sorted alphabetically for consistent `--help` output. Flags with default values are -// placed at the top, and everything else goes under. -type SuperFlagHelp struct { - head string - defaults *SuperFlag - flags map[string]string -} - -func NewSuperFlagHelp(defaults string) *SuperFlagHelp { - return &SuperFlagHelp{ - defaults: NewSuperFlag(defaults), - flags: make(map[string]string, 0), - } -} - -func (h *SuperFlagHelp) Head(head string) *SuperFlagHelp { - h.head = head - return h -} - -func (h *SuperFlagHelp) Flag(name, description string) *SuperFlagHelp { - h.flags[name] = description - return h -} - -func (h *SuperFlagHelp) String() string { - defaultLines := make([]string, 0) - otherLines := make([]string, 0) - for name, help := range h.flags { - val, found := h.defaults.m[name] - line := fmt.Sprintf(" %s=%s; %s\n", name, val, help) - if found { - defaultLines = append(defaultLines, line) - } else { - otherLines = append(otherLines, line) - } - } - sort.Strings(defaultLines) - sort.Strings(otherLines) - dls := strings.Join(defaultLines, "") - ols := strings.Join(otherLines, "") - if len(h.defaults.m) == 0 && len(ols) == 0 { - // remove last newline - dls = dls[:len(dls)-1] - } - // remove last newline - if len(h.defaults.m) == 0 && len(ols) > 1 { - ols = ols[:len(ols)-1] - } - return h.head + "\n" + dls + ols -} - -func parseFlag(flag string) (map[string]string, error) { - kvm := make(map[string]string) - for _, kv := range strings.Split(flag, ";") { - if strings.TrimSpace(kv) == "" { - continue - } - // For a non-empty separator, 0 < len(splits) ≤ 2. - splits := strings.SplitN(kv, "=", 2) - k := strings.TrimSpace(splits[0]) - if len(splits) < 2 { - return nil, fmt.Errorf("superflag: missing value for '%s' in flag: %s", k, flag) - } - k = strings.ToLower(k) - k = strings.ReplaceAll(k, "_", "-") - kvm[k] = strings.TrimSpace(splits[1]) - } - return kvm, nil -} - -type SuperFlag struct { - m map[string]string -} - -func NewSuperFlag(flag string) *SuperFlag { - sf, err := newSuperFlagImpl(flag) - if err != nil { - glog.Fatal(err) - } - return sf -} - -func newSuperFlagImpl(flag string) (*SuperFlag, error) { - m, err := parseFlag(flag) - if err != nil { - return nil, err - } - return &SuperFlag{m}, nil -} - -func (sf *SuperFlag) String() string { - if sf == nil { - return "" - } - kvs := make([]string, 0, len(sf.m)) - for k, v := range sf.m { - kvs = append(kvs, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(kvs, "; ") -} - -func (sf *SuperFlag) MergeAndCheckDefault(flag string) *SuperFlag { - sf, err := sf.mergeAndCheckDefaultImpl(flag) - if err != nil { - glog.Fatal(err) - } - return sf -} - -func (sf *SuperFlag) mergeAndCheckDefaultImpl(flag string) (*SuperFlag, error) { - if sf == nil { - m, err := parseFlag(flag) - if err != nil { - return nil, err - } - return &SuperFlag{m}, nil - } - - src, err := parseFlag(flag) - if err != nil { - return nil, err - } - - numKeys := len(sf.m) - for k := range src { - if _, ok := sf.m[k]; ok { - numKeys-- - } - } - if numKeys != 0 { - return nil, fmt.Errorf("superflag: found invalid options in flag: %s.\nvalid options: %v", sf, flag) - } - for k, v := range src { - if _, ok := sf.m[k]; !ok { - sf.m[k] = v - } - } - return sf, nil -} - -func (sf *SuperFlag) Has(opt string) bool { - val := sf.GetString(opt) - return val != "" -} - -func (sf *SuperFlag) GetDuration(opt string) time.Duration { - val := sf.GetString(opt) - if val == "" { - return time.Duration(0) - } - if strings.Contains(val, "d") { - val = strings.Replace(val, "d", "", 1) - days, err := strconv.ParseUint(val, 0, 64) - if err != nil { - return time.Duration(0) - } - return time.Hour * 24 * time.Duration(days) - } - d, err := time.ParseDuration(val) - if err != nil { - return time.Duration(0) - } - return d -} - -func (sf *SuperFlag) GetBool(opt string) bool { - val := sf.GetString(opt) - if val == "" { - return false - } - b, err := strconv.ParseBool(val) - if err != nil { - err = errors.Wrapf(err, - "Unable to parse %s as bool for key: %s. Options: %s\n", - val, opt, sf) - glog.Fatalf("%+v", err) - } - return b -} - -func (sf *SuperFlag) GetFloat64(opt string) float64 { - val := sf.GetString(opt) - if val == "" { - return 0 - } - f, err := strconv.ParseFloat(val, 64) - if err != nil { - err = errors.Wrapf(err, - "Unable to parse %s as float64 for key: %s. Options: %s\n", - val, opt, sf) - glog.Fatalf("%+v", err) - } - return f -} - -func (sf *SuperFlag) GetInt64(opt string) int64 { - val := sf.GetString(opt) - if val == "" { - return 0 - } - i, err := strconv.ParseInt(val, 0, 64) - if err != nil { - err = errors.Wrapf(err, - "Unable to parse %s as int64 for key: %s. Options: %s\n", - val, opt, sf) - glog.Fatalf("%+v", err) - } - return i -} - -func (sf *SuperFlag) GetUint64(opt string) uint64 { - val := sf.GetString(opt) - if val == "" { - return 0 - } - u, err := strconv.ParseUint(val, 0, 64) - if err != nil { - err = errors.Wrapf(err, - "Unable to parse %s as uint64 for key: %s. Options: %s\n", - val, opt, sf) - glog.Fatalf("%+v", err) - } - return u -} - -func (sf *SuperFlag) GetUint32(opt string) uint32 { - val := sf.GetString(opt) - if val == "" { - return 0 - } - u, err := strconv.ParseUint(val, 0, 32) - if err != nil { - err = errors.Wrapf(err, - "Unable to parse %s as uint32 for key: %s. Options: %s\n", - val, opt, sf) - glog.Fatalf("%+v", err) - } - return uint32(u) -} - -func (sf *SuperFlag) GetString(opt string) string { - if sf == nil { - return "" - } - return sf.m[opt] -} - -func (sf *SuperFlag) GetPath(opt string) string { - p := sf.GetString(opt) - path, err := expandPath(p) - if err != nil { - glog.Fatalf("Failed to get path: %+v", err) - } - return path -} - -// expandPath expands the paths containing ~ to /home/user. It also computes the absolute path -// from the relative paths. For example: ~/abc/../cef will be transformed to /home/user/cef. -func expandPath(path string) (string, error) { - if len(path) == 0 { - return "", nil - } - if path[0] == '~' && (len(path) == 1 || os.IsPathSeparator(path[1])) { - usr, err := user.Current() - if err != nil { - return "", errors.Wrap(err, "Failed to get the home directory of the user") - } - path = filepath.Join(usr.HomeDir, path[1:]) - } - - var err error - path, err = filepath.Abs(path) - if err != nil { - return "", errors.Wrap(err, "Failed to generate absolute path") - } - return path, nil -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/histogram.go b/vendor/github.com/dgraph-io/ristretto/z/histogram.go deleted file mode 100644 index 4eb0c4f..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/histogram.go +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "fmt" - "math" - "strings" - - "github.com/dustin/go-humanize" -) - -// Creates bounds for an histogram. The bounds are powers of two of the form -// [2^min_exponent, ..., 2^max_exponent]. -func HistogramBounds(minExponent, maxExponent uint32) []float64 { - var bounds []float64 - for i := minExponent; i <= maxExponent; i++ { - bounds = append(bounds, float64(int(1)< 4) - bounds := make([]float64, num) - bounds[0] = 1 - bounds[1] = 2 - for i := 2; i < num; i++ { - bounds[i] = bounds[i-1] + bounds[i-2] - } - return bounds -} - -// HistogramData stores the information needed to represent the sizes of the keys and values -// as a histogram. -type HistogramData struct { - Bounds []float64 - Count int64 - CountPerBucket []int64 - Min int64 - Max int64 - Sum int64 -} - -// NewHistogramData returns a new instance of HistogramData with properly initialized fields. -func NewHistogramData(bounds []float64) *HistogramData { - return &HistogramData{ - Bounds: bounds, - CountPerBucket: make([]int64, len(bounds)+1), - Max: 0, - Min: math.MaxInt64, - } -} - -func (histogram *HistogramData) Copy() *HistogramData { - if histogram == nil { - return nil - } - return &HistogramData{ - Bounds: append([]float64{}, histogram.Bounds...), - CountPerBucket: append([]int64{}, histogram.CountPerBucket...), - Count: histogram.Count, - Min: histogram.Min, - Max: histogram.Max, - Sum: histogram.Sum, - } -} - -// Update changes the Min and Max fields if value is less than or greater than the current values. -func (histogram *HistogramData) Update(value int64) { - if histogram == nil { - return - } - if value > histogram.Max { - histogram.Max = value - } - if value < histogram.Min { - histogram.Min = value - } - - histogram.Sum += value - histogram.Count++ - - for index := 0; index <= len(histogram.Bounds); index++ { - // Allocate value in the last buckets if we reached the end of the Bounds array. - if index == len(histogram.Bounds) { - histogram.CountPerBucket[index]++ - break - } - - if value < int64(histogram.Bounds[index]) { - histogram.CountPerBucket[index]++ - break - } - } -} - -// Mean returns the mean value for the histogram. -func (histogram *HistogramData) Mean() float64 { - if histogram.Count == 0 { - return 0 - } - return float64(histogram.Sum) / float64(histogram.Count) -} - -// String converts the histogram data into human-readable string. -func (histogram *HistogramData) String() string { - if histogram == nil { - return "" - } - var b strings.Builder - - b.WriteString("\n -- Histogram: \n") - b.WriteString(fmt.Sprintf("Min value: %d \n", histogram.Min)) - b.WriteString(fmt.Sprintf("Max value: %d \n", histogram.Max)) - b.WriteString(fmt.Sprintf("Count: %d \n", histogram.Count)) - b.WriteString(fmt.Sprintf("50p: %.2f \n", histogram.Percentile(0.5))) - b.WriteString(fmt.Sprintf("75p: %.2f \n", histogram.Percentile(0.75))) - b.WriteString(fmt.Sprintf("90p: %.2f \n", histogram.Percentile(0.90))) - - numBounds := len(histogram.Bounds) - var cum float64 - for index, count := range histogram.CountPerBucket { - if count == 0 { - continue - } - - // The last bucket represents the bucket that contains the range from - // the last bound up to infinity so it's processed differently than the - // other buckets. - if index == len(histogram.CountPerBucket)-1 { - lowerBound := uint64(histogram.Bounds[numBounds-1]) - page := float64(count*100) / float64(histogram.Count) - cum += page - b.WriteString(fmt.Sprintf("[%s, %s) %d %.2f%% %.2f%%\n", - humanize.IBytes(lowerBound), "infinity", count, page, cum)) - continue - } - - upperBound := uint64(histogram.Bounds[index]) - lowerBound := uint64(0) - if index > 0 { - lowerBound = uint64(histogram.Bounds[index-1]) - } - - page := float64(count*100) / float64(histogram.Count) - cum += page - b.WriteString(fmt.Sprintf("[%d, %d) %d %.2f%% %.2f%%\n", - lowerBound, upperBound, count, page, cum)) - } - b.WriteString(" --\n") - return b.String() -} - -// Percentile returns the percentile value for the histogram. -// value of p should be between [0.0-1.0] -func (histogram *HistogramData) Percentile(p float64) float64 { - if histogram == nil { - return 0 - } - - if histogram.Count == 0 { - // if no data return the minimum range - return histogram.Bounds[0] - } - pval := int64(float64(histogram.Count) * p) - for i, v := range histogram.CountPerBucket { - pval = pval - v - if pval <= 0 { - if i == len(histogram.Bounds) { - break - } - return histogram.Bounds[i] - } - } - // default return should be the max range - return histogram.Bounds[len(histogram.Bounds)-1] -} - -// Clear reset the histogram. Helpful in situations where we need to reset the metrics -func (histogram *HistogramData) Clear() { - if histogram == nil { - return - } - - histogram.Count = 0 - histogram.CountPerBucket = make([]int64, len(histogram.Bounds)+1) - histogram.Sum = 0 - histogram.Max = 0 - histogram.Min = math.MaxInt64 -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap.go b/vendor/github.com/dgraph-io/ristretto/z/mmap.go deleted file mode 100644 index 9b02510..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "os" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - return mmap(fd, writable, size) -} - -// Munmap unmaps a previously mapped slice. -func Munmap(b []byte) error { - return munmap(b) -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func Madvise(b []byte, readahead bool) error { - return madvise(b, readahead) -} - -// Msync would call sync on the mmapped data. -func Msync(b []byte) error { - return msync(b) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_darwin.go b/vendor/github.com/dgraph-io/ristretto/z/mmap_darwin.go deleted file mode 100644 index 4d6d74f..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap_darwin.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "os" - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - mtype := unix.PROT_READ - if writable { - mtype |= unix.PROT_WRITE - } - return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return unix.Munmap(b) -} - -// This is required because the unix package does not support the madvise system call on OS X. -func madvise(b []byte, readahead bool) error { - advice := unix.MADV_NORMAL - if !readahead { - advice = unix.MADV_RANDOM - } - - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), - uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - return e1 - } - return nil -} - -func msync(b []byte) error { - return unix.Msync(b, unix.MS_SYNC) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_linux.go b/vendor/github.com/dgraph-io/ristretto/z/mmap_linux.go deleted file mode 100644 index 331330c..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap_linux.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -// mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - mtype := unix.PROT_READ - if writable { - mtype |= unix.PROT_WRITE - } - return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) -} - -// munmap unmaps a previously mapped slice. -// -// unix.Munmap maintains an internal list of mmapped addresses, and only calls munmap -// if the address is present in that list. If we use mremap, this list is not updated. -// To bypass this, we call munmap ourselves. -func munmap(data []byte) error { - if len(data) == 0 || len(data) != cap(data) { - return unix.EINVAL - } - _, _, errno := unix.Syscall( - unix.SYS_MUNMAP, - uintptr(unsafe.Pointer(&data[0])), - uintptr(len(data)), - 0, - ) - if errno != 0 { - return errno - } - return nil -} - -// madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func madvise(b []byte, readahead bool) error { - flags := unix.MADV_NORMAL - if !readahead { - flags = unix.MADV_RANDOM - } - return unix.Madvise(b, flags) -} - -// msync writes any modified data to persistent storage. -func msync(b []byte) error { - return unix.Msync(b, unix.MS_SYNC) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_plan9.go b/vendor/github.com/dgraph-io/ristretto/z/mmap_plan9.go deleted file mode 100644 index f307296..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap_plan9.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "os" - "syscall" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - return nil, syscall.EPLAN9 -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return syscall.EPLAN9 -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func madvise(b []byte, readahead bool) error { - return syscall.EPLAN9 -} - -func msync(b []byte) error { - return syscall.EPLAN9 -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_unix.go b/vendor/github.com/dgraph-io/ristretto/z/mmap_unix.go deleted file mode 100644 index e8b2699..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap_unix.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build !windows,!darwin,!plan9,!linux - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - mtype := unix.PROT_READ - if writable { - mtype |= unix.PROT_WRITE - } - return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return unix.Munmap(b) -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func madvise(b []byte, readahead bool) error { - flags := unix.MADV_NORMAL - if !readahead { - flags = unix.MADV_RANDOM - } - return unix.Madvise(b, flags) -} - -func msync(b []byte) error { - return unix.Msync(b, unix.MS_SYNC) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_windows.go b/vendor/github.com/dgraph-io/ristretto/z/mmap_windows.go deleted file mode 100644 index 171176e..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap_windows.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build windows - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -func mmap(fd *os.File, write bool, size int64) ([]byte, error) { - protect := syscall.PAGE_READONLY - access := syscall.FILE_MAP_READ - - if write { - protect = syscall.PAGE_READWRITE - access = syscall.FILE_MAP_WRITE - } - fi, err := fd.Stat() - if err != nil { - return nil, err - } - - // In windows, we cannot mmap a file more than it's actual size. - // So truncate the file to the size of the mmap. - if fi.Size() < size { - if err := fd.Truncate(size); err != nil { - return nil, fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(size >> 32) - sizehi := uint32(size) & 0xffffffff - - handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil, - uint32(protect), sizelo, sizehi, nil) - if err != nil { - return nil, os.NewSyscallError("CreateFileMapping", err) - } - - // Create the memory map. - addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size)) - if addr == 0 { - return nil, os.NewSyscallError("MapViewOfFile", err) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil { - return nil, os.NewSyscallError("CloseHandle", err) - } - - // Slice memory layout - // Copied this snippet from golang/sys package - var sl = struct { - addr uintptr - len int - cap int - }{addr, int(size), int(size)} - - // Use unsafe to turn sl into a []byte. - data := *(*[]byte)(unsafe.Pointer(&sl)) - - return data, nil -} - -func munmap(b []byte) error { - return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0]))) -} - -func madvise(b []byte, readahead bool) error { - // Do Nothing. We don’t care about this setting on Windows - return nil -} - -func msync(b []byte) error { - // TODO: Figure out how to do msync on Windows. - return nil -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux.go b/vendor/github.com/dgraph-io/ristretto/z/mremap_linux.go deleted file mode 100644 index 2256786..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build !arm64 - -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "fmt" - "reflect" - "unsafe" - - "golang.org/x/sys/unix" -) - -// mremap is a Linux-specific system call to remap pages in memory. This can be used in place of munmap + mmap. -func mremap(data []byte, size int) ([]byte, error) { - //nolint:lll - // taken from - const MREMAP_MAYMOVE = 0x1 - - header := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - mmapAddr, mmapSize, errno := unix.Syscall6( - unix.SYS_MREMAP, - header.Data, - uintptr(header.Len), - uintptr(size), - uintptr(MREMAP_MAYMOVE), - 0, - 0, - ) - if errno != 0 { - return nil, errno - } - if mmapSize != uintptr(size) { - return nil, fmt.Errorf("mremap size mismatch: requested: %d got: %d", size, mmapSize) - } - - header.Data = mmapAddr - header.Cap = size - header.Len = size - return data, nil -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux_arm64.go b/vendor/github.com/dgraph-io/ristretto/z/mremap_linux_arm64.go deleted file mode 100644 index 09683cd..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux_arm64.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "reflect" - "unsafe" - - "golang.org/x/sys/unix" -) - -// mremap is a Linux-specific system call to remap pages in memory. This can be used in place of munmap + mmap. -func mremap(data []byte, size int) ([]byte, error) { - //nolint:lll - // taken from - const MREMAP_MAYMOVE = 0x1 - - header := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - // For ARM64, the second return argument for SYS_MREMAP is inconsistent (prior allocated size) with - // other architectures, which return the size allocated - mmapAddr, _, errno := unix.Syscall6( - unix.SYS_MREMAP, - header.Data, - uintptr(header.Len), - uintptr(size), - uintptr(MREMAP_MAYMOVE), - 0, - 0, - ) - if errno != 0 { - return nil, errno - } - - header.Data = mmapAddr - header.Cap = size - header.Len = size - return data, nil -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go b/vendor/github.com/dgraph-io/ristretto/z/rtutil.go deleted file mode 100644 index 8f317c8..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go +++ /dev/null @@ -1,75 +0,0 @@ -// MIT License - -// Copyright (c) 2019 Ewan Chou - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package z - -import ( - "unsafe" -) - -// NanoTime returns the current time in nanoseconds from a monotonic clock. -//go:linkname NanoTime runtime.nanotime -func NanoTime() int64 - -// CPUTicks is a faster alternative to NanoTime to measure time duration. -//go:linkname CPUTicks runtime.cputicks -func CPUTicks() int64 - -type stringStruct struct { - str unsafe.Pointer - len int -} - -//go:noescape -//go:linkname memhash runtime.memhash -func memhash(p unsafe.Pointer, h, s uintptr) uintptr - -// MemHash is the hash function used by go map, it utilizes available hardware instructions(behaves -// as aeshash if aes instruction is available). -// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash. -func MemHash(data []byte) uint64 { - ss := (*stringStruct)(unsafe.Pointer(&data)) - return uint64(memhash(ss.str, 0, uintptr(ss.len))) -} - -// MemHashString is the hash function used by go map, it utilizes available hardware instructions -// (behaves as aeshash if aes instruction is available). -// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash. -func MemHashString(str string) uint64 { - ss := (*stringStruct)(unsafe.Pointer(&str)) - return uint64(memhash(ss.str, 0, uintptr(ss.len))) -} - -// FastRand is a fast thread local random function. -//go:linkname FastRand runtime.fastrand -func FastRand() uint32 - -//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers -func memclrNoHeapPointers(p unsafe.Pointer, n uintptr) - -func Memclr(b []byte) { - if len(b) == 0 { - return - } - p := unsafe.Pointer(&b[0]) - memclrNoHeapPointers(p, uintptr(len(b))) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.s b/vendor/github.com/dgraph-io/ristretto/z/rtutil.s deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/baseline.go b/vendor/github.com/dgraph-io/ristretto/z/simd/baseline.go deleted file mode 100644 index 967e3a3..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/simd/baseline.go +++ /dev/null @@ -1,127 +0,0 @@ -package simd - -import ( - "fmt" - "runtime" - "sort" - "sync" -) - -// Search finds the key using the naive way -func Naive(xs []uint64, k uint64) int16 { - var i int - for i = 0; i < len(xs); i += 2 { - x := xs[i] - if x >= k { - return int16(i / 2) - } - } - return int16(i / 2) -} - -func Clever(xs []uint64, k uint64) int16 { - if len(xs) < 8 { - return Naive(xs, k) - } - var twos, pk [4]uint64 - pk[0] = k - pk[1] = k - pk[2] = k - pk[3] = k - for i := 0; i < len(xs); i += 8 { - twos[0] = xs[i] - twos[1] = xs[i+2] - twos[2] = xs[i+4] - twos[3] = xs[i+6] - if twos[0] >= pk[0] { - return int16(i / 2) - } - if twos[1] >= pk[1] { - return int16((i + 2) / 2) - } - if twos[2] >= pk[2] { - return int16((i + 4) / 2) - } - if twos[3] >= pk[3] { - return int16((i + 6) / 2) - } - - } - return int16(len(xs) / 2) -} - -func Parallel(xs []uint64, k uint64) int16 { - cpus := runtime.NumCPU() - if cpus%2 != 0 { - panic(fmt.Sprintf("odd number of CPUs %v", cpus)) - } - sz := len(xs)/cpus + 1 - var wg sync.WaitGroup - retChan := make(chan int16, cpus) - for i := 0; i < len(xs); i += sz { - end := i + sz - if end >= len(xs) { - end = len(xs) - } - chunk := xs[i:end] - wg.Add(1) - go func(hd int16, xs []uint64, k uint64, wg *sync.WaitGroup, ch chan int16) { - for i := 0; i < len(xs); i += 2 { - if xs[i] >= k { - ch <- (int16(i) + hd) / 2 - break - } - } - wg.Done() - }(int16(i), chunk, k, &wg, retChan) - } - wg.Wait() - close(retChan) - var min int16 = (1 << 15) - 1 - for i := range retChan { - if i < min { - min = i - } - } - if min == (1<<15)-1 { - return int16(len(xs) / 2) - } - return min -} - -func Binary(keys []uint64, key uint64) int16 { - return int16(sort.Search(len(keys), func(i int) bool { - if i*2 >= len(keys) { - return true - } - return keys[i*2] >= key - })) -} - -func cmp2_native(twos, pk [2]uint64) int16 { - if twos[0] == pk[0] { - return 0 - } - if twos[1] == pk[1] { - return 1 - } - return 2 -} - -func cmp4_native(fours, pk [4]uint64) int16 { - for i := range fours { - if fours[i] >= pk[i] { - return int16(i) - } - } - return 4 -} - -func cmp8_native(a [8]uint64, pk [4]uint64) int16 { - for i := range a { - if a[i] >= pk[0] { - return int16(i) - } - } - return 8 -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/search.go b/vendor/github.com/dgraph-io/ristretto/z/simd/search.go deleted file mode 100644 index b1e6392..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/simd/search.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build !amd64 - -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package simd - -// Search uses the Clever search to find the correct key. -func Search(xs []uint64, k uint64) int16 { - if len(xs) < 8 || (len(xs) % 8 != 0) { - return Naive(xs, k) - } - var twos, pk [4]uint64 - pk[0] = k - pk[1] = k - pk[2] = k - pk[3] = k - for i := 0; i < len(xs); i += 8 { - twos[0] = xs[i] - twos[1] = xs[i+2] - twos[2] = xs[i+4] - twos[3] = xs[i+6] - if twos[0] >= pk[0] { - return int16(i / 2) - } - if twos[1] >= pk[1] { - return int16((i + 2) / 2) - } - if twos[2] >= pk[2] { - return int16((i + 4) / 2) - } - if twos[3] >= pk[3] { - return int16((i + 6) / 2) - } - - } - return int16(len(xs) / 2) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/search_amd64.s b/vendor/github.com/dgraph-io/ristretto/z/simd/search_amd64.s deleted file mode 100644 index 150c846..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/simd/search_amd64.s +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by command: go run asm2.go -out search_amd64.s -stubs stub_search_amd64.go. DO NOT EDIT. - -#include "textflag.h" - -// func Search(xs []uint64, k uint64) int16 -TEXT ·Search(SB), NOSPLIT, $0-34 - MOVQ xs_base+0(FP), AX - MOVQ xs_len+8(FP), CX - MOVQ k+24(FP), DX - - // Save n - MOVQ CX, BX - - // Initialize idx register to zero. - XORL BP, BP - -loop: - // Unroll1 - CMPQ (AX)(BP*8), DX - JAE Found - - // Unroll2 - CMPQ 16(AX)(BP*8), DX - JAE Found2 - - // Unroll3 - CMPQ 32(AX)(BP*8), DX - JAE Found3 - - // Unroll4 - CMPQ 48(AX)(BP*8), DX - JAE Found4 - - // plus8 - ADDQ $0x08, BP - CMPQ BP, CX - JB loop - JMP NotFound - -Found2: - ADDL $0x02, BP - JMP Found - -Found3: - ADDL $0x04, BP - JMP Found - -Found4: - ADDL $0x06, BP - -Found: - MOVL BP, BX - -NotFound: - MOVL BX, BP - SHRL $0x1f, BP - ADDL BX, BP - SHRL $0x01, BP - MOVL BP, ret+32(FP) - RET diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/stub_search_amd64.go b/vendor/github.com/dgraph-io/ristretto/z/simd/stub_search_amd64.go deleted file mode 100644 index 0821d38..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/simd/stub_search_amd64.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by command: go run asm2.go -out search_amd64.s -stubs stub_search_amd64.go. DO NOT EDIT. - -package simd - -// Search finds the first idx for which xs[idx] >= k in xs. -func Search(xs []uint64, k uint64) int16 diff --git a/vendor/github.com/dgraph-io/ristretto/z/z.go b/vendor/github.com/dgraph-io/ristretto/z/z.go deleted file mode 100644 index 9745558..0000000 --- a/vendor/github.com/dgraph-io/ristretto/z/z.go +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "context" - "sync" - - "github.com/cespare/xxhash/v2" -) - -// TODO: Figure out a way to re-use memhash for the second uint64 hash, we -// already know that appending bytes isn't reliable for generating a -// second hash (see Ristretto PR #88). -// -// We also know that while the Go runtime has a runtime memhash128 -// function, it's not possible to use it to generate [2]uint64 or -// anything resembling a 128bit hash, even though that's exactly what -// we need in this situation. -func KeyToHash(key interface{}) (uint64, uint64) { - if key == nil { - return 0, 0 - } - switch k := key.(type) { - case uint64: - return k, 0 - case string: - return MemHashString(k), xxhash.Sum64String(k) - case []byte: - return MemHash(k), xxhash.Sum64(k) - case byte: - return uint64(k), 0 - case int: - return uint64(k), 0 - case int32: - return uint64(k), 0 - case uint32: - return uint64(k), 0 - case int64: - return uint64(k), 0 - default: - panic("Key type not supported") - } -} - -var ( - dummyCloserChan <-chan struct{} - tmpDir string -) - -// Closer holds the two things we need to close a goroutine and wait for it to -// finish: a chan to tell the goroutine to shut down, and a WaitGroup with -// which to wait for it to finish shutting down. -type Closer struct { - waiting sync.WaitGroup - - ctx context.Context - cancel context.CancelFunc -} - -// SetTmpDir sets the temporary directory for the temporary buffers. -func SetTmpDir(dir string) { - tmpDir = dir -} - -// NewCloser constructs a new Closer, with an initial count on the WaitGroup. -func NewCloser(initial int) *Closer { - ret := &Closer{} - ret.ctx, ret.cancel = context.WithCancel(context.Background()) - ret.waiting.Add(initial) - return ret -} - -// AddRunning Add()'s delta to the WaitGroup. -func (lc *Closer) AddRunning(delta int) { - lc.waiting.Add(delta) -} - -// Ctx can be used to get a context, which would automatically get cancelled when Signal is called. -func (lc *Closer) Ctx() context.Context { - if lc == nil { - return context.Background() - } - return lc.ctx -} - -// Signal signals the HasBeenClosed signal. -func (lc *Closer) Signal() { - // Todo(ibrahim): Change Signal to return error on next badger breaking change. - lc.cancel() -} - -// HasBeenClosed gets signaled when Signal() is called. -func (lc *Closer) HasBeenClosed() <-chan struct{} { - if lc == nil { - return dummyCloserChan - } - return lc.ctx.Done() -} - -// Done calls Done() on the WaitGroup. -func (lc *Closer) Done() { - if lc == nil { - return - } - lc.waiting.Done() -} - -// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done -// calls to balance out.) -func (lc *Closer) Wait() { - lc.waiting.Wait() -} - -// SignalAndWait calls Signal(), then Wait(). -func (lc *Closer) SignalAndWait() { - lc.Signal() - lc.Wait() -} - -// ZeroOut zeroes out all the bytes in the range [start, end). -func ZeroOut(dst []byte, start, end int) { - if start < 0 || start >= len(dst) { - return // BAD - } - if end >= len(dst) { - end = len(dst) - } - if end-start <= 0 { - return - } - Memclr(dst[start:end]) - // b := dst[start:end] - // for i := range b { - // b[i] = 0x0 - // } -} diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE deleted file mode 100644 index 22080f7..0000000 --- a/vendor/github.com/dgryski/go-rendezvous/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017-2020 Damian Gryski - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go deleted file mode 100644 index 7a6f820..0000000 --- a/vendor/github.com/dgryski/go-rendezvous/rdv.go +++ /dev/null @@ -1,79 +0,0 @@ -package rendezvous - -type Rendezvous struct { - nodes map[string]int - nstr []string - nhash []uint64 - hash Hasher -} - -type Hasher func(s string) uint64 - -func New(nodes []string, hash Hasher) *Rendezvous { - r := &Rendezvous{ - nodes: make(map[string]int, len(nodes)), - nstr: make([]string, len(nodes)), - nhash: make([]uint64, len(nodes)), - hash: hash, - } - - for i, n := range nodes { - r.nodes[n] = i - r.nstr[i] = n - r.nhash[i] = hash(n) - } - - return r -} - -func (r *Rendezvous) Lookup(k string) string { - // short-circuit if we're empty - if len(r.nodes) == 0 { - return "" - } - - khash := r.hash(k) - - var midx int - var mhash = xorshiftMult64(khash ^ r.nhash[0]) - - for i, nhash := range r.nhash[1:] { - if h := xorshiftMult64(khash ^ nhash); h > mhash { - midx = i + 1 - mhash = h - } - } - - return r.nstr[midx] -} - -func (r *Rendezvous) Add(node string) { - r.nodes[node] = len(r.nstr) - r.nstr = append(r.nstr, node) - r.nhash = append(r.nhash, r.hash(node)) -} - -func (r *Rendezvous) Remove(node string) { - // find index of node to remove - nidx := r.nodes[node] - - // remove from the slices - l := len(r.nstr) - r.nstr[nidx] = r.nstr[l] - r.nstr = r.nstr[:l] - - r.nhash[nidx] = r.nhash[l] - r.nhash = r.nhash[:l] - - // update the map - delete(r.nodes, node) - moved := r.nstr[nidx] - r.nodes[moved] = nidx -} - -func xorshiftMult64(x uint64) uint64 { - x ^= x >> 12 // a - x ^= x << 25 // b - x ^= x >> 27 // c - return x * 2685821657736338717 -} diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml deleted file mode 100644 index ba95cdd..0000000 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a..0000000 --- a/vendor/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 91b4ae5..0000000 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,124 +0,0 @@ -# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize`. - -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for -complete documentation. - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83 MB` or `79 MiB` (whichever you prefer). - -Example: - -```go -fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. -``` - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - -```go -fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. -``` - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - -```go -fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. -``` - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - -```go -fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. -``` - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - -```go -fmt.Printf("%f", 2.24) // 2.240000 -fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 -fmt.Printf("%f", 2.0) // 2.000000 -fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 -``` - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - -```go -humanize.SI(0.00000000223, "M") // 2.23 nM -``` - -## English-specific functions - -The following functions are in the `humanize/english` subpackage. - -### Plurals - -Simple English pluralization - -```go -english.PluralWord(1, "object", "") // object -english.PluralWord(42, "object", "") // objects -english.PluralWord(2, "bus", "") // buses -english.PluralWord(99, "locus", "loci") // loci - -english.Plural(1, "object", "") // 1 object -english.Plural(42, "object", "") // 42 objects -english.Plural(2, "bus", "") // 2 buses -english.Plural(99, "locus", "loci") // 99 loci -``` - -### Word series - -Format comma-separated words lists with conjuctions: - -```go -english.WordSeries([]string{"foo"}, "and") // foo -english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar -english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz - -english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz -``` - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc33..0000000 --- a/vendor/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 1a2bf61..0000000 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,173 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%d B", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83 MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79 MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42 MB") -> 42000000, nil -// ParseBigBytes("42 mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - val := &big.Rat{} - _, err := fmt.Sscanf(num, "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index 0b498f4..0000000 --- a/vendor/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,143 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%d B", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83 MB -func Bytes(s uint64) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79 MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42 MB") -> 42000000, nil -// ParseBytes("42 mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - f, err := strconv.ParseFloat(num, 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index 520ae3e..0000000 --- a/vendor/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,116 +0,0 @@ -package humanize - -import ( - "bytes" - "math" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - - // Min int64 can't be negated to a usable value, so it has to be special cased. - if v == math.MinInt64 { - return "-9,223,372,036,854,775,808" - } - - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Commaf(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// CommafWithDigits works like the Commaf but limits the resulting -// string to the given number of decimal places. -// -// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 -func CommafWithDigits(f float64, decimals int) string { - return stripTrailingDigits(Commaf(f), decimals) -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:], ",") -} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go deleted file mode 100644 index 620690d..0000000 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "bytes" - "math/big" - "strings" -) - -// BigCommaf produces a string form of the given big.Float in base 10 -// with commas after every three orders of magnitude. -func BigCommaf(v *big.Float) string { - buf := &bytes.Buffer{} - if v.Sign() < 0 { - buf.Write([]byte{'-'}) - v.Abs(v) - } - - comma := []byte{','} - - parts := strings.Split(v.Text('f', -1), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index 1c62b64..0000000 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,46 +0,0 @@ -package humanize - -import ( - "strconv" - "strings" -) - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -func stripTrailingDigits(s string, digits int) string { - if i := strings.Index(s, "."); i >= 0 { - if digits <= 0 { - return s[:i] - } - i++ - if i+digits >= len(s) { - return s - } - return s[:i+digits] - } - return s -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} - -// FtoaWithDigits converts a float to a string but limits the resulting string -// to the given number of decimal places, and no trailing zeros. -func FtoaWithDigits(num float64, digits int) string { - return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) -} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a2c2da3..0000000 --- a/vendor/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83 MB" or -"79 MiB" (whichever you prefer). -*/ -package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go deleted file mode 100644 index dec6186..0000000 --- a/vendor/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.FormatInt(int64(intf), 10) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a8..0000000 --- a/vendor/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go deleted file mode 100644 index ae659e0..0000000 --- a/vendor/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,123 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([\-0-9.]+)\s?([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - mag := math.Abs(input) - exponent := math.Floor(logn(mag, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := mag / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1 M instead of 1000 k - if value == 1000.0 { - exponent += 3 - value = mag / math.Pow(10, exponent) - } - - value = math.Copysign(value, input) - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, "B") -> 1 MB -// e.g. SI(2.2345e-12, "F") -> 2.2345 pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + " " + prefix + unit -} - -// SIWithDigits works like SI but limits the resulting string to the -// given number of decimal places. -// -// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB -// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF -func SIWithDigits(input float64, decimals int, unit string) string { - value, prefix := ComputeSI(input) - return FtoaWithDigits(value, decimals) + " " + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go deleted file mode 100644 index dd3fbf5..0000000 --- a/vendor/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,117 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Day = 24 * time.Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -// A RelTimeMagnitude struct contains a relative time point at which -// the relative format of time will switch to a new format string. A -// slice of these in ascending order by their "D" field is passed to -// CustomRelTime to format durations. -// -// The Format field is a string that may contain a "%s" which will be -// replaced with the appropriate signed label (e.g. "ago" or "from -// now") and a "%d" that will be replaced by the quantity. -// -// The DivBy field is the amount of time the time difference must be -// divided by in order to display correctly. -// -// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" -// DivBy should be time.Minute so whatever the duration is will be -// expressed in minutes. -type RelTimeMagnitude struct { - D time.Duration - Format string - DivBy time.Duration -} - -var defaultMagnitudes = []RelTimeMagnitude{ - {time.Second, "now", time.Second}, - {2 * time.Second, "1 second %s", 1}, - {time.Minute, "%d seconds %s", time.Second}, - {2 * time.Minute, "1 minute %s", 1}, - {time.Hour, "%d minutes %s", time.Minute}, - {2 * time.Hour, "1 hour %s", 1}, - {Day, "%d hours %s", time.Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) -} - -// CustomRelTime formats a time into a relative string. -// -// It takes two times two labels and a table of relative time formats. -// In addition to the generic time delta string (e.g. 5 minutes), the -// labels are used applied so that the label corresponding to the -// smaller time is applied. -func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { - lbl := albl - diff := b.Sub(a) - - if a.After(b) { - lbl = blbl - diff = a.Sub(b) - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].D > diff - }) - - if n >= len(magnitudes) { - n = len(magnitudes) - 1 - } - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.Format { - if escaped { - switch ch { - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.DivBy) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.Format, args...) -} diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore deleted file mode 100644 index b975a7b..0000000 --- a/vendor/github.com/go-redis/redis/v8/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.rdb -testdata/*/ -.idea/ diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml deleted file mode 100644 index de51455..0000000 --- a/vendor/github.com/go-redis/redis/v8/.golangci.yml +++ /dev/null @@ -1,4 +0,0 @@ -run: - concurrency: 8 - deadline: 5m - tests: false diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml deleted file mode 100644 index 8b7f044..0000000 --- a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml +++ /dev/null @@ -1,4 +0,0 @@ -semi: false -singleQuote: true -proseWrap: always -printWidth: 100 diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md deleted file mode 100644 index 195e519..0000000 --- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md +++ /dev/null @@ -1,177 +0,0 @@ -## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17) - - -### Bug Fixes - -* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a)) -* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c)) -* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475)) -* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2)) -* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32)) -* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4)) -* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc)) -* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f)) -* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2)) - - -### Features - -* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8)) -* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e)) -* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7)) -* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e)) -* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b)) -* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417)) -* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d)) - - - -## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) - - -### Features - -* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634)) -* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24)) -* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4)) - - - -## v8.11 - -- Remove OpenTelemetry metrics. -- Supports more redis commands and options. - -## v8.10 - -- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a - single span with a Redis command (instead of 4 spans). There are multiple reasons behind this - decision: - - - Traces become smaller and less noisy. - - It may be costly to process those 3 extra spans for each query. - - go-redis no longer depends on OpenTelemetry. - - Eventually we hope to replace the information that we no longer collect with OpenTelemetry - Metrics. - -## v8.9 - -- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`, - `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings. - -## v8.8 - -- To make updating easier, extra modules now have the same version as go-redis does. That means that - you need to update your imports: - -``` -github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8 -github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8 -``` - -## v8.5 - -- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a - struct: - -```go -err := rdb.HGetAll(ctx, "hash").Scan(&data) - -err := rdb.MGet(ctx, "key1", "key2").Scan(&data) -``` - -- Please check [redismock](https://github.com/go-redis/redismock) by - [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client. - -## v8 - -- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not - using `context.Context` yet, the simplest option is to define global package variable - `var ctx = context.TODO()` and use it when `ctx` is required. - -- Full support for `context.Context` canceling. - -- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node. - -- Added `redisext.OpenTemetryHook` that adds - [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). - -- Redis slow log support. - -- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move - existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: - -```go -import "github.com/golang/groupcache/consistenthash" - -ring := redis.NewRing(&redis.RingOptions{ - NewConsistentHash: func() { - return consistenthash.New(100, crc32.ChecksumIEEE) - }, -}) -``` - -- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3. -- `Options.MaxRetries` default value is changed from 0 to 3. - -- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. - -## v7.3 - -- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection - URL contains username. - -## v7.2 - -- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. - -## v7.1 - -- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` - interface. - -## v7 - -- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a - transactional pipeline. -- WrapProcess is replaced with more convenient AddHook that has access to context.Context. -- WithContext now can not be used to create a shallow copy of the client. -- New methods ProcessContext, DoContext, and ExecContext. -- Client respects Context.Deadline when setting net.Conn deadline. -- Client listens on Context.Done while waiting for a connection from the pool and returns an error - when context context is cancelled. -- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow - detecting reconnections. -- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse - the time. -- `SetLimiter` is removed and added `Options.Limiter` instead. -- `HMSet` is deprecated as of Redis v4. - -## v6.15 - -- Cluster and Ring pipelines process commands for each node in its own goroutine. - -## 6.14 - -- Added Options.MinIdleConns. -- Added Options.MaxConnAge. -- PoolStats.FreeConns is renamed to PoolStats.IdleConns. -- Add Client.Do to simplify creating custom commands. -- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. -- Lower memory usage. - -## v6.13 - -- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set - `HashReplicas = 1000` for better keys distribution between shards. -- Cluster client was optimized to use much less memory when reloading cluster state. -- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout - occurres. In most cases it is recommended to use PubSub.Channel instead. -- Dialer.KeepAlive is set to 5 minutes by default. - -## v6.12 - -- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis - Servers that don't have cluster mode enabled. See - https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE deleted file mode 100644 index 298bed9..0000000 --- a/vendor/github.com/go-redis/redis/v8/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2013 The github.com/go-redis/redis Authors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile deleted file mode 100644 index a4cfe05..0000000 --- a/vendor/github.com/go-redis/redis/v8/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort) - -test: testdeps - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - go vet - -testdeps: testdata/redis/src/redis-server - -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem - -.PHONY: all test testdeps bench - -testdata/redis: - mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - cd $< && make all - -fmt: - gofmt -w -s ./ - goimports -w -local github.com/go-redis/redis ./ - -go_mod_tidy: - go get -u && go mod tidy - set -e; for dir in $(PACKAGE_DIRS); do \ - echo "go mod tidy in $${dir}"; \ - (cd "$${dir}" && \ - go get -u && \ - go mod tidy); \ - done diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md deleted file mode 100644 index f3b6a01..0000000 --- a/vendor/github.com/go-redis/redis/v8/README.md +++ /dev/null @@ -1,175 +0,0 @@ -# Redis client for Go - -![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) -[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) - -go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). -Uptrace is an open source and blazingly fast **distributed tracing** backend powered by -OpenTelemetry and ClickHouse. Give it a star as well! - -## Resources - -- [Discussions](https://github.com/go-redis/redis/discussions) -- [Documentation](https://redis.uptrace.dev) -- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) -- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) -- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) - -Other projects you may like: - -- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. -- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. - -## Ecosystem - -- [Redis Mock](https://github.com/go-redis/redismock) -- [Distributed Locks](https://github.com/bsm/redislock) -- [Redis Cache](https://github.com/go-redis/cache) -- [Rate limiting](https://github.com/go-redis/redis_rate) - -## Features - -- Redis 3 commands except QUIT, MONITOR, and SYNC. -- Automatic connection pooling with - [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. -- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). -- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). -- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and - [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline). -- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). -- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). -- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). -- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). -- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup) - without using cluster mode and Redis Sentinel. -- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). -- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation). - -## Installation - -go-redis supports 2 last Go versions and requires a Go version with -[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go -module: - -```shell -go mod init github.com/my/repo -``` - -And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): - -```shell -go get github.com/go-redis/redis/v8 -``` - -## Quickstart - -```go -import ( - "context" - "github.com/go-redis/redis/v8" - "fmt" -) - -var ctx = context.Background() - -func ExampleClient() { - rdb := redis.NewClient(&redis.Options{ - Addr: "localhost:6379", - Password: "", // no password set - DB: 0, // use default DB - }) - - err := rdb.Set(ctx, "key", "value", 0).Err() - if err != nil { - panic(err) - } - - val, err := rdb.Get(ctx, "key").Result() - if err != nil { - panic(err) - } - fmt.Println("key", val) - - val2, err := rdb.Get(ctx, "key2").Result() - if err == redis.Nil { - fmt.Println("key2 does not exist") - } else if err != nil { - panic(err) - } else { - fmt.Println("key2", val2) - } - // Output: key value - // key2 does not exist -} -``` - -## Look and feel - -Some corner cases: - -```go -// SET key value EX 10 NX -set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() - -// SET key value keepttl NX -set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result() - -// SORT list LIMIT 0 2 ASC -vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() - -// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 -vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ - Min: "-inf", - Max: "+inf", - Offset: 0, - Count: 2, -}).Result() - -// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM -vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ - Keys: []string{"zset1", "zset2"}, - Weights: []int64{2, 3} -}).Result() - -// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" -vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() - -// custom command -res, err := rdb.Do(ctx, "set", "key", "value").Result() -``` - -## Run the test - -go-redis will start a redis-server and run the test cases. - -The paths of redis-server bin file and redis config file are defined in `main_test.go`: - -``` -var ( - redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) - redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) -) -``` - -For local testing, you can change the variables to refer to your local files, or create a soft link -to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: - -``` -ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src -cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ -``` - -Lastly, run: - -``` -go test -``` - -## Contributors - -Thanks to all the people who already contributed! - - - - diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md deleted file mode 100644 index 1115db4..0000000 --- a/vendor/github.com/go-redis/redis/v8/RELEASING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Releasing - -1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub: - -```shell -TAG=v1.0.0 ./scripts/release.sh -``` - -2. Open a pull request and wait for the build to finish. - -3. Merge the pull request and run `tag.sh` to create tags for packages: - -```shell -TAG=v1.0.0 ./scripts/tag.sh -``` diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go deleted file mode 100644 index a54f2f3..0000000 --- a/vendor/github.com/go-redis/redis/v8/cluster.go +++ /dev/null @@ -1,1750 +0,0 @@ -package redis - -import ( - "context" - "crypto/tls" - "fmt" - "math" - "net" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hashtag" - "github.com/go-redis/redis/v8/internal/pool" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/rand" -) - -var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") - -// ClusterOptions are used to configure a cluster client and should be -// passed to NewClusterClient. -type ClusterOptions struct { - // A seed list of host:port addresses of cluster nodes. - Addrs []string - - // NewClient creates a cluster node client with provided name and options. - NewClient func(opt *Options) *Client - - // The maximum number of retries before giving up. Command is retried - // on network errors and MOVED/ASK redirects. - // Default is 3 retries. - MaxRedirects int - - // Enables read-only commands on slave nodes. - ReadOnly bool - // Allows routing read-only commands to the closest master or slave node. - // It automatically enables ReadOnly. - RouteByLatency bool - // Allows routing read-only commands to the random master or slave node. - // It automatically enables ReadOnly. - RouteRandomly bool - - // Optional function that returns cluster slots information. - // It is useful to manually create cluster of standalone Redis servers - // and load-balance read/write operations between master and slaves. - // It can use service like ZooKeeper to maintain configuration information - // and Cluster.ReloadState to manually trigger state reloading. - ClusterSlots func(context.Context) ([]ClusterSlot, error) - - // Following options are copied from Options struct. - - Dialer func(ctx context.Context, network, addr string) (net.Conn, error) - - OnConnect func(ctx context.Context, cn *Conn) error - - Username string - Password string - - MaxRetries int - MinRetryBackoff time.Duration - MaxRetryBackoff time.Duration - - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - - // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). - PoolFIFO bool - - // PoolSize applies per cluster node and not for the whole cluster. - PoolSize int - MinIdleConns int - MaxConnAge time.Duration - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration - - TLSConfig *tls.Config -} - -func (opt *ClusterOptions) init() { - if opt.MaxRedirects == -1 { - opt.MaxRedirects = 0 - } else if opt.MaxRedirects == 0 { - opt.MaxRedirects = 3 - } - - if opt.RouteByLatency || opt.RouteRandomly { - opt.ReadOnly = true - } - - if opt.PoolSize == 0 { - opt.PoolSize = 5 * runtime.GOMAXPROCS(0) - } - - switch opt.ReadTimeout { - case -1: - opt.ReadTimeout = 0 - case 0: - opt.ReadTimeout = 3 * time.Second - } - switch opt.WriteTimeout { - case -1: - opt.WriteTimeout = 0 - case 0: - opt.WriteTimeout = opt.ReadTimeout - } - - if opt.MaxRetries == 0 { - opt.MaxRetries = -1 - } - switch opt.MinRetryBackoff { - case -1: - opt.MinRetryBackoff = 0 - case 0: - opt.MinRetryBackoff = 8 * time.Millisecond - } - switch opt.MaxRetryBackoff { - case -1: - opt.MaxRetryBackoff = 0 - case 0: - opt.MaxRetryBackoff = 512 * time.Millisecond - } - - if opt.NewClient == nil { - opt.NewClient = NewClient - } -} - -func (opt *ClusterOptions) clientOptions() *Options { - const disableIdleCheck = -1 - - return &Options{ - Dialer: opt.Dialer, - OnConnect: opt.OnConnect, - - Username: opt.Username, - Password: opt.Password, - - MaxRetries: opt.MaxRetries, - MinRetryBackoff: opt.MinRetryBackoff, - MaxRetryBackoff: opt.MaxRetryBackoff, - - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, - - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - MinIdleConns: opt.MinIdleConns, - MaxConnAge: opt.MaxConnAge, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: disableIdleCheck, - - TLSConfig: opt.TLSConfig, - // If ClusterSlots is populated, then we probably have an artificial - // cluster whose nodes are not in clustering mode (otherwise there isn't - // much use for ClusterSlots config). This means we cannot execute the - // READONLY command against that node -- setting readOnly to false in such - // situations in the options below will prevent that from happening. - readOnly: opt.ReadOnly && opt.ClusterSlots == nil, - } -} - -//------------------------------------------------------------------------------ - -type clusterNode struct { - Client *Client - - latency uint32 // atomic - generation uint32 // atomic - failing uint32 // atomic -} - -func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { - opt := clOpt.clientOptions() - opt.Addr = addr - node := clusterNode{ - Client: clOpt.NewClient(opt), - } - - node.latency = math.MaxUint32 - if clOpt.RouteByLatency { - go node.updateLatency() - } - - return &node -} - -func (n *clusterNode) String() string { - return n.Client.String() -} - -func (n *clusterNode) Close() error { - return n.Client.Close() -} - -func (n *clusterNode) updateLatency() { - const numProbe = 10 - var dur uint64 - - for i := 0; i < numProbe; i++ { - time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) - - start := time.Now() - n.Client.Ping(context.TODO()) - dur += uint64(time.Since(start) / time.Microsecond) - } - - latency := float64(dur) / float64(numProbe) - atomic.StoreUint32(&n.latency, uint32(latency+0.5)) -} - -func (n *clusterNode) Latency() time.Duration { - latency := atomic.LoadUint32(&n.latency) - return time.Duration(latency) * time.Microsecond -} - -func (n *clusterNode) MarkAsFailing() { - atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) -} - -func (n *clusterNode) Failing() bool { - const timeout = 15 // 15 seconds - - failing := atomic.LoadUint32(&n.failing) - if failing == 0 { - return false - } - if time.Now().Unix()-int64(failing) < timeout { - return true - } - atomic.StoreUint32(&n.failing, 0) - return false -} - -func (n *clusterNode) Generation() uint32 { - return atomic.LoadUint32(&n.generation) -} - -func (n *clusterNode) SetGeneration(gen uint32) { - for { - v := atomic.LoadUint32(&n.generation) - if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { - break - } - } -} - -//------------------------------------------------------------------------------ - -type clusterNodes struct { - opt *ClusterOptions - - mu sync.RWMutex - addrs []string - nodes map[string]*clusterNode - activeAddrs []string - closed bool - - _generation uint32 // atomic -} - -func newClusterNodes(opt *ClusterOptions) *clusterNodes { - return &clusterNodes{ - opt: opt, - - addrs: opt.Addrs, - nodes: make(map[string]*clusterNode), - } -} - -func (c *clusterNodes) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil - } - c.closed = true - - var firstErr error - for _, node := range c.nodes { - if err := node.Client.Close(); err != nil && firstErr == nil { - firstErr = err - } - } - - c.nodes = nil - c.activeAddrs = nil - - return firstErr -} - -func (c *clusterNodes) Addrs() ([]string, error) { - var addrs []string - - c.mu.RLock() - closed := c.closed //nolint:ifshort - if !closed { - if len(c.activeAddrs) > 0 { - addrs = c.activeAddrs - } else { - addrs = c.addrs - } - } - c.mu.RUnlock() - - if closed { - return nil, pool.ErrClosed - } - if len(addrs) == 0 { - return nil, errClusterNoNodes - } - return addrs, nil -} - -func (c *clusterNodes) NextGeneration() uint32 { - return atomic.AddUint32(&c._generation, 1) -} - -// GC removes unused nodes. -func (c *clusterNodes) GC(generation uint32) { - //nolint:prealloc - var collected []*clusterNode - - c.mu.Lock() - - c.activeAddrs = c.activeAddrs[:0] - for addr, node := range c.nodes { - if node.Generation() >= generation { - c.activeAddrs = append(c.activeAddrs, addr) - if c.opt.RouteByLatency { - go node.updateLatency() - } - continue - } - - delete(c.nodes, addr) - collected = append(collected, node) - } - - c.mu.Unlock() - - for _, node := range collected { - _ = node.Client.Close() - } -} - -func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { - node, err := c.get(addr) - if err != nil { - return nil, err - } - if node != nil { - return node, nil - } - - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil, pool.ErrClosed - } - - node, ok := c.nodes[addr] - if ok { - return node, nil - } - - node = newClusterNode(c.opt, addr) - - c.addrs = appendIfNotExists(c.addrs, addr) - c.nodes[addr] = node - - return node, nil -} - -func (c *clusterNodes) get(addr string) (*clusterNode, error) { - var node *clusterNode - var err error - c.mu.RLock() - if c.closed { - err = pool.ErrClosed - } else { - node = c.nodes[addr] - } - c.mu.RUnlock() - return node, err -} - -func (c *clusterNodes) All() ([]*clusterNode, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - if c.closed { - return nil, pool.ErrClosed - } - - cp := make([]*clusterNode, 0, len(c.nodes)) - for _, node := range c.nodes { - cp = append(cp, node) - } - return cp, nil -} - -func (c *clusterNodes) Random() (*clusterNode, error) { - addrs, err := c.Addrs() - if err != nil { - return nil, err - } - - n := rand.Intn(len(addrs)) - return c.GetOrCreate(addrs[n]) -} - -//------------------------------------------------------------------------------ - -type clusterSlot struct { - start, end int - nodes []*clusterNode -} - -type clusterSlotSlice []*clusterSlot - -func (p clusterSlotSlice) Len() int { - return len(p) -} - -func (p clusterSlotSlice) Less(i, j int) bool { - return p[i].start < p[j].start -} - -func (p clusterSlotSlice) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -type clusterState struct { - nodes *clusterNodes - Masters []*clusterNode - Slaves []*clusterNode - - slots []*clusterSlot - - generation uint32 - createdAt time.Time -} - -func newClusterState( - nodes *clusterNodes, slots []ClusterSlot, origin string, -) (*clusterState, error) { - c := clusterState{ - nodes: nodes, - - slots: make([]*clusterSlot, 0, len(slots)), - - generation: nodes.NextGeneration(), - createdAt: time.Now(), - } - - originHost, _, _ := net.SplitHostPort(origin) - isLoopbackOrigin := isLoopback(originHost) - - for _, slot := range slots { - var nodes []*clusterNode - for i, slotNode := range slot.Nodes { - addr := slotNode.Addr - if !isLoopbackOrigin { - addr = replaceLoopbackHost(addr, originHost) - } - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return nil, err - } - - node.SetGeneration(c.generation) - nodes = append(nodes, node) - - if i == 0 { - c.Masters = appendUniqueNode(c.Masters, node) - } else { - c.Slaves = appendUniqueNode(c.Slaves, node) - } - } - - c.slots = append(c.slots, &clusterSlot{ - start: slot.Start, - end: slot.End, - nodes: nodes, - }) - } - - sort.Sort(clusterSlotSlice(c.slots)) - - time.AfterFunc(time.Minute, func() { - nodes.GC(c.generation) - }) - - return &c, nil -} - -func replaceLoopbackHost(nodeAddr, originHost string) string { - nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) - if err != nil { - return nodeAddr - } - - nodeIP := net.ParseIP(nodeHost) - if nodeIP == nil { - return nodeAddr - } - - if !nodeIP.IsLoopback() { - return nodeAddr - } - - // Use origin host which is not loopback and node port. - return net.JoinHostPort(originHost, nodePort) -} - -func isLoopback(host string) bool { - ip := net.ParseIP(host) - if ip == nil { - return true - } - return ip.IsLoopback() -} - -func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - if len(nodes) > 0 { - return nodes[0], nil - } - return c.nodes.Random() -} - -func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - switch len(nodes) { - case 0: - return c.nodes.Random() - case 1: - return nodes[0], nil - case 2: - if slave := nodes[1]; !slave.Failing() { - return slave, nil - } - return nodes[0], nil - default: - var slave *clusterNode - for i := 0; i < 10; i++ { - n := rand.Intn(len(nodes)-1) + 1 - slave = nodes[n] - if !slave.Failing() { - return slave, nil - } - } - - // All slaves are loading - use master. - return nodes[0], nil - } -} - -func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - if len(nodes) == 0 { - return c.nodes.Random() - } - - var node *clusterNode - for _, n := range nodes { - if n.Failing() { - continue - } - if node == nil || n.Latency() < node.Latency() { - node = n - } - } - if node != nil { - return node, nil - } - - // If all nodes are failing - return random node - return c.nodes.Random() -} - -func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - if len(nodes) == 0 { - return c.nodes.Random() - } - if len(nodes) == 1 { - return nodes[0], nil - } - randomNodes := rand.Perm(len(nodes)) - for _, idx := range randomNodes { - if node := nodes[idx]; !node.Failing() { - return node, nil - } - } - return nodes[randomNodes[0]], nil -} - -func (c *clusterState) slotNodes(slot int) []*clusterNode { - i := sort.Search(len(c.slots), func(i int) bool { - return c.slots[i].end >= slot - }) - if i >= len(c.slots) { - return nil - } - x := c.slots[i] - if slot >= x.start && slot <= x.end { - return x.nodes - } - return nil -} - -//------------------------------------------------------------------------------ - -type clusterStateHolder struct { - load func(ctx context.Context) (*clusterState, error) - - state atomic.Value - reloading uint32 // atomic -} - -func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { - return &clusterStateHolder{ - load: fn, - } -} - -func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { - state, err := c.load(ctx) - if err != nil { - return nil, err - } - c.state.Store(state) - return state, nil -} - -func (c *clusterStateHolder) LazyReload() { - if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { - return - } - go func() { - defer atomic.StoreUint32(&c.reloading, 0) - - _, err := c.Reload(context.Background()) - if err != nil { - return - } - time.Sleep(200 * time.Millisecond) - }() -} - -func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { - v := c.state.Load() - if v == nil { - return c.Reload(ctx) - } - - state := v.(*clusterState) - if time.Since(state.createdAt) > 10*time.Second { - c.LazyReload() - } - return state, nil -} - -func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { - state, err := c.Reload(ctx) - if err == nil { - return state, nil - } - return c.Get(ctx) -} - -//------------------------------------------------------------------------------ - -type clusterClient struct { - opt *ClusterOptions - nodes *clusterNodes - state *clusterStateHolder //nolint:structcheck - cmdsInfoCache *cmdsInfoCache //nolint:structcheck -} - -// ClusterClient is a Redis Cluster client representing a pool of zero -// or more underlying connections. It's safe for concurrent use by -// multiple goroutines. -type ClusterClient struct { - *clusterClient - cmdable - hooks - ctx context.Context -} - -// NewClusterClient returns a Redis Cluster client as described in -// http://redis.io/topics/cluster-spec. -func NewClusterClient(opt *ClusterOptions) *ClusterClient { - opt.init() - - c := &ClusterClient{ - clusterClient: &clusterClient{ - opt: opt, - nodes: newClusterNodes(opt), - }, - ctx: context.Background(), - } - c.state = newClusterStateHolder(c.loadState) - c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) - c.cmdable = c.Process - - if opt.IdleCheckFrequency > 0 { - go c.reaper(opt.IdleCheckFrequency) - } - - return c -} - -func (c *ClusterClient) Context() context.Context { - return c.ctx -} - -func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { - if ctx == nil { - panic("nil context") - } - clone := *c - clone.cmdable = clone.Process - clone.hooks.lock() - clone.ctx = ctx - return &clone -} - -// Options returns read-only Options that were used to create the client. -func (c *ClusterClient) Options() *ClusterOptions { - return c.opt -} - -// ReloadState reloads cluster state. If available it calls ClusterSlots func -// to get cluster slots information. -func (c *ClusterClient) ReloadState(ctx context.Context) { - c.state.LazyReload() -} - -// Close closes the cluster client, releasing any open resources. -// -// It is rare to Close a ClusterClient, as the ClusterClient is meant -// to be long-lived and shared between many goroutines. -func (c *ClusterClient) Close() error { - return c.nodes.Close() -} - -// Do creates a Cmd from the args and processes the cmd. -func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd -} - -func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { - return c.hooks.process(ctx, cmd, c.process) -} - -func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { - cmdInfo := c.cmdInfo(cmd.Name()) - slot := c.cmdSlot(cmd) - - var node *clusterNode - var ask bool - var lastErr error - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - return err - } - } - - if node == nil { - var err error - node, err = c.cmdNode(ctx, cmdInfo, slot) - if err != nil { - return err - } - } - - if ask { - pipe := node.Client.Pipeline() - _ = pipe.Process(ctx, NewCmd(ctx, "asking")) - _ = pipe.Process(ctx, cmd) - _, lastErr = pipe.Exec(ctx) - _ = pipe.Close() - ask = false - } else { - lastErr = node.Client.Process(ctx, cmd) - } - - // If there is no error - we are done. - if lastErr == nil { - return nil - } - if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { - if isReadOnly { - c.state.LazyReload() - } - node = nil - continue - } - - // If slave is loading - pick another node. - if c.opt.ReadOnly && isLoadingError(lastErr) { - node.MarkAsFailing() - node = nil - continue - } - - var moved bool - var addr string - moved, ask, addr = isMovedError(lastErr) - if moved || ask { - c.state.LazyReload() - - var err error - node, err = c.nodes.GetOrCreate(addr) - if err != nil { - return err - } - continue - } - - if shouldRetry(lastErr, cmd.readTimeout() == nil) { - // First retry the same node. - if attempt == 0 { - continue - } - - // Second try another node. - node.MarkAsFailing() - node = nil - continue - } - - return lastErr - } - return lastErr -} - -// ForEachMaster concurrently calls the fn on each master node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachMaster( - ctx context.Context, - fn func(ctx context.Context, client *Client) error, -) error { - state, err := c.state.ReloadOrGet(ctx) - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - - for _, master := range state.Masters { - wg.Add(1) - go func(node *clusterNode) { - defer wg.Done() - err := fn(ctx, node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - }(master) - } - - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// ForEachSlave concurrently calls the fn on each slave node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachSlave( - ctx context.Context, - fn func(ctx context.Context, client *Client) error, -) error { - state, err := c.state.ReloadOrGet(ctx) - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - - for _, slave := range state.Slaves { - wg.Add(1) - go func(node *clusterNode) { - defer wg.Done() - err := fn(ctx, node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - }(slave) - } - - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// ForEachShard concurrently calls the fn on each known node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachShard( - ctx context.Context, - fn func(ctx context.Context, client *Client) error, -) error { - state, err := c.state.ReloadOrGet(ctx) - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - - worker := func(node *clusterNode) { - defer wg.Done() - err := fn(ctx, node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - } - - for _, node := range state.Masters { - wg.Add(1) - go worker(node) - } - for _, node := range state.Slaves { - wg.Add(1) - go worker(node) - } - - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// PoolStats returns accumulated connection pool stats. -func (c *ClusterClient) PoolStats() *PoolStats { - var acc PoolStats - - state, _ := c.state.Get(context.TODO()) - if state == nil { - return &acc - } - - for _, node := range state.Masters { - s := node.Client.connPool.Stats() - acc.Hits += s.Hits - acc.Misses += s.Misses - acc.Timeouts += s.Timeouts - - acc.TotalConns += s.TotalConns - acc.IdleConns += s.IdleConns - acc.StaleConns += s.StaleConns - } - - for _, node := range state.Slaves { - s := node.Client.connPool.Stats() - acc.Hits += s.Hits - acc.Misses += s.Misses - acc.Timeouts += s.Timeouts - - acc.TotalConns += s.TotalConns - acc.IdleConns += s.IdleConns - acc.StaleConns += s.StaleConns - } - - return &acc -} - -func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { - if c.opt.ClusterSlots != nil { - slots, err := c.opt.ClusterSlots(ctx) - if err != nil { - return nil, err - } - return newClusterState(c.nodes, slots, "") - } - - addrs, err := c.nodes.Addrs() - if err != nil { - return nil, err - } - - var firstErr error - - for _, idx := range rand.Perm(len(addrs)) { - addr := addrs[idx] - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - if firstErr == nil { - firstErr = err - } - continue - } - - slots, err := node.Client.ClusterSlots(ctx).Result() - if err != nil { - if firstErr == nil { - firstErr = err - } - continue - } - - return newClusterState(c.nodes, slots, node.Client.opt.Addr) - } - - /* - * No node is connectable. It's possible that all nodes' IP has changed. - * Clear activeAddrs to let client be able to re-connect using the initial - * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), - * which might have chance to resolve domain name and get updated IP address. - */ - c.nodes.mu.Lock() - c.nodes.activeAddrs = nil - c.nodes.mu.Unlock() - - return nil, firstErr -} - -// reaper closes idle connections to the cluster. -func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { - ticker := time.NewTicker(idleCheckFrequency) - defer ticker.Stop() - - for range ticker.C { - nodes, err := c.nodes.All() - if err != nil { - break - } - - for _, node := range nodes { - _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() - if err != nil { - internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) - } - } - } -} - -func (c *ClusterClient) Pipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processPipeline, - } - pipe.init() - return &pipe -} - -func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(ctx, fn) -} - -func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processPipeline(ctx, cmds, c._processPipeline) -} - -func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { - cmdsMap := newCmdsMap() - err := c.mapCmdsByNode(ctx, cmdsMap, cmds) - if err != nil { - setCmdsErr(cmds, err) - return err - } - - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - setCmdsErr(cmds, err) - return err - } - } - - failedCmds := newCmdsMap() - var wg sync.WaitGroup - - for node, cmds := range cmdsMap.m { - wg.Add(1) - go func(node *clusterNode, cmds []Cmder) { - defer wg.Done() - - err := c._processPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } - }(node, cmds) - } - - wg.Wait() - if len(failedCmds.m) == 0 { - break - } - cmdsMap = failedCmds - } - - return cmdsFirstErr(cmds) -} - -func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { - state, err := c.state.Get(ctx) - if err != nil { - return err - } - - if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - node, err := c.slotReadOnlyNode(state, slot) - if err != nil { - return err - } - cmdsMap.Add(node, cmd) - } - return nil - } - - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - node, err := state.slotMasterNode(slot) - if err != nil { - return err - } - cmdsMap.Add(node, cmd) - } - return nil -} - -func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { - for _, cmd := range cmds { - cmdInfo := c.cmdInfo(cmd.Name()) - if cmdInfo == nil || !cmdInfo.ReadOnly { - return false - } - } - return true -} - -func (c *ClusterClient) _processPipelineNode( - ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, -) error { - return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err - } - - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) - }) - }) - }) -} - -func (c *ClusterClient) pipelineReadCmds( - ctx context.Context, - node *clusterNode, - rd *proto.Reader, - cmds []Cmder, - failedCmds *cmdsMap, -) error { - for _, cmd := range cmds { - err := cmd.readReply(rd) - cmd.SetErr(err) - - if err == nil { - continue - } - - if c.checkMovedErr(ctx, cmd, err, failedCmds) { - continue - } - - if c.opt.ReadOnly && isLoadingError(err) { - node.MarkAsFailing() - return err - } - if isRedisError(err) { - continue - } - return err - } - return nil -} - -func (c *ClusterClient) checkMovedErr( - ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, -) bool { - moved, ask, addr := isMovedError(err) - if !moved && !ask { - return false - } - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return false - } - - if moved { - c.state.LazyReload() - failedCmds.Add(node, cmd) - return true - } - - if ask { - failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) - return true - } - - panic("not reached") -} - -// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. -func (c *ClusterClient) TxPipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processTxPipeline, - } - pipe.init() - return &pipe -} - -func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.TxPipeline().Pipelined(ctx, fn) -} - -func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline) -} - -func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { - // Trim multi .. exec. - cmds = cmds[1 : len(cmds)-1] - - state, err := c.state.Get(ctx) - if err != nil { - setCmdsErr(cmds, err) - return err - } - - cmdsMap := c.mapCmdsBySlot(cmds) - for slot, cmds := range cmdsMap { - node, err := state.slotMasterNode(slot) - if err != nil { - setCmdsErr(cmds, err) - continue - } - - cmdsMap := map[*clusterNode][]Cmder{node: cmds} - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - setCmdsErr(cmds, err) - return err - } - } - - failedCmds := newCmdsMap() - var wg sync.WaitGroup - - for node, cmds := range cmdsMap { - wg.Add(1) - go func(node *clusterNode, cmds []Cmder) { - defer wg.Done() - - err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } - }(node, cmds) - } - - wg.Wait() - if len(failedCmds.m) == 0 { - break - } - cmdsMap = failedCmds.m - } - } - - return cmdsFirstErr(cmds) -} - -func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { - cmdsMap := make(map[int][]Cmder) - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - cmdsMap[slot] = append(cmdsMap[slot], cmd) - } - return cmdsMap -} - -func (c *ClusterClient) _processTxPipelineNode( - ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, -) error { - return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err - } - - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - statusCmd := cmds[0].(*StatusCmd) - // Trim multi and exec. - cmds = cmds[1 : len(cmds)-1] - - err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) - if err != nil { - moved, ask, addr := isMovedError(err) - if moved || ask { - return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) - } - return err - } - - return pipelineReadCmds(rd, cmds) - }) - }) - }) -} - -func (c *ClusterClient) txPipelineReadQueued( - ctx context.Context, - rd *proto.Reader, - statusCmd *StatusCmd, - cmds []Cmder, - failedCmds *cmdsMap, -) error { - // Parse queued replies. - if err := statusCmd.readReply(rd); err != nil { - return err - } - - for _, cmd := range cmds { - err := statusCmd.readReply(rd) - if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { - continue - } - return err - } - - // Parse number of replies. - line, err := rd.ReadLine() - if err != nil { - if err == Nil { - err = TxFailedErr - } - return err - } - - switch line[0] { - case proto.ErrorReply: - return proto.ParseErrorReply(line) - case proto.ArrayReply: - // ok - default: - return fmt.Errorf("redis: expected '*', but got line %q", line) - } - - return nil -} - -func (c *ClusterClient) cmdsMoved( - ctx context.Context, cmds []Cmder, - moved, ask bool, - addr string, - failedCmds *cmdsMap, -) error { - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return err - } - - if moved { - c.state.LazyReload() - for _, cmd := range cmds { - failedCmds.Add(node, cmd) - } - return nil - } - - if ask { - for _, cmd := range cmds { - failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) - } - return nil - } - - return nil -} - -func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { - if len(keys) == 0 { - return fmt.Errorf("redis: Watch requires at least one key") - } - - slot := hashtag.Slot(keys[0]) - for _, key := range keys[1:] { - if hashtag.Slot(key) != slot { - err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") - return err - } - } - - node, err := c.slotMasterNode(ctx, slot) - if err != nil { - return err - } - - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - return err - } - } - - err = node.Client.Watch(ctx, fn, keys...) - if err == nil { - break - } - - moved, ask, addr := isMovedError(err) - if moved || ask { - node, err = c.nodes.GetOrCreate(addr) - if err != nil { - return err - } - continue - } - - if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { - if isReadOnly { - c.state.LazyReload() - } - node, err = c.slotMasterNode(ctx, slot) - if err != nil { - return err - } - continue - } - - if shouldRetry(err, true) { - continue - } - - return err - } - - return err -} - -func (c *ClusterClient) pubSub() *PubSub { - var node *clusterNode - pubsub := &PubSub{ - opt: c.opt.clientOptions(), - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { - if node != nil { - panic("node != nil") - } - - var err error - if len(channels) > 0 { - slot := hashtag.Slot(channels[0]) - node, err = c.slotMasterNode(ctx, slot) - } else { - node, err = c.nodes.Random() - } - if err != nil { - return nil, err - } - - cn, err := node.Client.newConn(context.TODO()) - if err != nil { - node = nil - - return nil, err - } - - return cn, nil - }, - closeConn: func(cn *pool.Conn) error { - err := node.Client.connPool.CloseConn(cn) - node = nil - return err - }, - } - pubsub.init() - - return pubsub -} - -// Subscribe subscribes the client to the specified channels. -// Channels can be omitted to create empty subscription. -func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.Subscribe(ctx, channels...) - } - return pubsub -} - -// PSubscribe subscribes the client to the given patterns. -// Patterns can be omitted to create empty subscription. -func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.PSubscribe(ctx, channels...) - } - return pubsub -} - -func (c *ClusterClient) retryBackoff(attempt int) time.Duration { - return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) -} - -func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { - // Try 3 random nodes. - const nodeLimit = 3 - - addrs, err := c.nodes.Addrs() - if err != nil { - return nil, err - } - - var firstErr error - - perm := rand.Perm(len(addrs)) - if len(perm) > nodeLimit { - perm = perm[:nodeLimit] - } - - for _, idx := range perm { - addr := addrs[idx] - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - if firstErr == nil { - firstErr = err - } - continue - } - - info, err := node.Client.Command(ctx).Result() - if err == nil { - return info, nil - } - if firstErr == nil { - firstErr = err - } - } - - if firstErr == nil { - panic("not reached") - } - return nil, firstErr -} - -func (c *ClusterClient) cmdInfo(name string) *CommandInfo { - cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) - if err != nil { - return nil - } - - info := cmdsInfo[name] - if info == nil { - internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) - } - return info -} - -func (c *ClusterClient) cmdSlot(cmd Cmder) int { - args := cmd.Args() - if args[0] == "cluster" && args[1] == "getkeysinslot" { - return args[2].(int) - } - - cmdInfo := c.cmdInfo(cmd.Name()) - return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) -} - -func cmdSlot(cmd Cmder, pos int) int { - if pos == 0 { - return hashtag.RandomSlot() - } - firstKey := cmd.stringArg(pos) - return hashtag.Slot(firstKey) -} - -func (c *ClusterClient) cmdNode( - ctx context.Context, - cmdInfo *CommandInfo, - slot int, -) (*clusterNode, error) { - state, err := c.state.Get(ctx) - if err != nil { - return nil, err - } - - if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { - return c.slotReadOnlyNode(state, slot) - } - return state.slotMasterNode(slot) -} - -func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { - if c.opt.RouteByLatency { - return state.slotClosestNode(slot) - } - if c.opt.RouteRandomly { - return state.slotRandomNode(slot) - } - return state.slotSlaveNode(slot) -} - -func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { - state, err := c.state.Get(ctx) - if err != nil { - return nil, err - } - return state.slotMasterNode(slot) -} - -// SlaveForKey gets a client for a replica node to run any command on it. -// This is especially useful if we want to run a particular lua script which has -// only read only commands on the replica. -// This is because other redis commands generally have a flag that points that -// they are read only and automatically run on the replica nodes -// if ClusterOptions.ReadOnly flag is set to true. -func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) { - state, err := c.state.Get(ctx) - if err != nil { - return nil, err - } - slot := hashtag.Slot(key) - node, err := c.slotReadOnlyNode(state, slot) - if err != nil { - return nil, err - } - return node.Client, err -} - -// MasterForKey return a client to the master node for a particular key. -func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) { - slot := hashtag.Slot(key) - node, err := c.slotMasterNode(ctx, slot) - if err != nil { - return nil, err - } - return node.Client, err -} - -func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { - for _, n := range nodes { - if n == node { - return nodes - } - } - return append(nodes, node) -} - -func appendIfNotExists(ss []string, es ...string) []string { -loop: - for _, e := range es { - for _, s := range ss { - if s == e { - continue loop - } - } - ss = append(ss, e) - } - return ss -} - -//------------------------------------------------------------------------------ - -type cmdsMap struct { - mu sync.Mutex - m map[*clusterNode][]Cmder -} - -func newCmdsMap() *cmdsMap { - return &cmdsMap{ - m: make(map[*clusterNode][]Cmder), - } -} - -func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { - m.mu.Lock() - m.m[node] = append(m.m[node], cmds...) - m.mu.Unlock() -} diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go deleted file mode 100644 index 085bce8..0000000 --- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go +++ /dev/null @@ -1,109 +0,0 @@ -package redis - -import ( - "context" - "sync" - "sync/atomic" -) - -func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { - cmd := NewIntCmd(ctx, "dbsize") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - var size int64 - err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { - n, err := master.DBSize(ctx).Result() - if err != nil { - return err - } - atomic.AddInt64(&size, n) - return nil - }) - if err != nil { - cmd.SetErr(err) - } else { - cmd.val = size - } - return nil - }) - return cmd -} - -func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { - cmd := NewStringCmd(ctx, "script", "load", script) - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} - err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { - val, err := shard.ScriptLoad(ctx, script).Result() - if err != nil { - return err - } - - mu.Lock() - if cmd.Val() == "" { - cmd.val = val - } - mu.Unlock() - - return nil - }) - if err != nil { - cmd.SetErr(err) - } - return nil - }) - return cmd -} - -func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "script", "flush") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { - return shard.ScriptFlush(ctx).Err() - }) - if err != nil { - cmd.SetErr(err) - } - return nil - }) - return cmd -} - -func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { - args := make([]interface{}, 2+len(hashes)) - args[0] = "script" - args[1] = "exists" - for i, hash := range hashes { - args[2+i] = hash - } - cmd := NewBoolSliceCmd(ctx, args...) - - result := make([]bool, len(hashes)) - for i := range result { - result[i] = true - } - - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} - err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { - val, err := shard.ScriptExists(ctx, hashes...).Result() - if err != nil { - return err - } - - mu.Lock() - for i, v := range val { - result[i] = result[i] && v - } - mu.Unlock() - - return nil - }) - if err != nil { - cmd.SetErr(err) - } else { - cmd.val = result - } - return nil - }) - return cmd -} diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go deleted file mode 100644 index 4bb12a8..0000000 --- a/vendor/github.com/go-redis/redis/v8/command.go +++ /dev/null @@ -1,3478 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "net" - "strconv" - "time" - - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hscan" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/util" -) - -type Cmder interface { - Name() string - FullName() string - Args() []interface{} - String() string - stringArg(int) string - firstKeyPos() int8 - SetFirstKeyPos(int8) - - readTimeout() *time.Duration - readReply(rd *proto.Reader) error - - SetErr(error) - Err() error -} - -func setCmdsErr(cmds []Cmder, e error) { - for _, cmd := range cmds { - if cmd.Err() == nil { - cmd.SetErr(e) - } - } -} - -func cmdsFirstErr(cmds []Cmder) error { - for _, cmd := range cmds { - if err := cmd.Err(); err != nil { - return err - } - } - return nil -} - -func writeCmds(wr *proto.Writer, cmds []Cmder) error { - for _, cmd := range cmds { - if err := writeCmd(wr, cmd); err != nil { - return err - } - } - return nil -} - -func writeCmd(wr *proto.Writer, cmd Cmder) error { - return wr.WriteArgs(cmd.Args()) -} - -func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { - if pos := cmd.firstKeyPos(); pos != 0 { - return int(pos) - } - - switch cmd.Name() { - case "eval", "evalsha": - if cmd.stringArg(2) != "0" { - return 3 - } - - return 0 - case "publish": - return 1 - case "memory": - // https://github.com/redis/redis/issues/7493 - if cmd.stringArg(1) == "usage" { - return 2 - } - } - - if info != nil { - return int(info.FirstKeyPos) - } - return 0 -} - -func cmdString(cmd Cmder, val interface{}) string { - b := make([]byte, 0, 64) - - for i, arg := range cmd.Args() { - if i > 0 { - b = append(b, ' ') - } - b = internal.AppendArg(b, arg) - } - - if err := cmd.Err(); err != nil { - b = append(b, ": "...) - b = append(b, err.Error()...) - } else if val != nil { - b = append(b, ": "...) - b = internal.AppendArg(b, val) - } - - return internal.String(b) -} - -//------------------------------------------------------------------------------ - -type baseCmd struct { - ctx context.Context - args []interface{} - err error - keyPos int8 - - _readTimeout *time.Duration -} - -var _ Cmder = (*Cmd)(nil) - -func (cmd *baseCmd) Name() string { - if len(cmd.args) == 0 { - return "" - } - // Cmd name must be lower cased. - return internal.ToLower(cmd.stringArg(0)) -} - -func (cmd *baseCmd) FullName() string { - switch name := cmd.Name(); name { - case "cluster", "command": - if len(cmd.args) == 1 { - return name - } - if s2, ok := cmd.args[1].(string); ok { - return name + " " + s2 - } - return name - default: - return name - } -} - -func (cmd *baseCmd) Args() []interface{} { - return cmd.args -} - -func (cmd *baseCmd) stringArg(pos int) string { - if pos < 0 || pos >= len(cmd.args) { - return "" - } - arg := cmd.args[pos] - switch v := arg.(type) { - case string: - return v - default: - // TODO: consider using appendArg - return fmt.Sprint(v) - } -} - -func (cmd *baseCmd) firstKeyPos() int8 { - return cmd.keyPos -} - -func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { - cmd.keyPos = keyPos -} - -func (cmd *baseCmd) SetErr(e error) { - cmd.err = e -} - -func (cmd *baseCmd) Err() error { - return cmd.err -} - -func (cmd *baseCmd) readTimeout() *time.Duration { - return cmd._readTimeout -} - -func (cmd *baseCmd) setReadTimeout(d time.Duration) { - cmd._readTimeout = &d -} - -//------------------------------------------------------------------------------ - -type Cmd struct { - baseCmd - - val interface{} -} - -func NewCmd(ctx context.Context, args ...interface{}) *Cmd { - return &Cmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *Cmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *Cmd) SetVal(val interface{}) { - cmd.val = val -} - -func (cmd *Cmd) Val() interface{} { - return cmd.val -} - -func (cmd *Cmd) Result() (interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *Cmd) Text() (string, error) { - if cmd.err != nil { - return "", cmd.err - } - return toString(cmd.val) -} - -func toString(val interface{}) (string, error) { - switch val := val.(type) { - case string: - return val, nil - default: - err := fmt.Errorf("redis: unexpected type=%T for String", val) - return "", err - } -} - -func (cmd *Cmd) Int() (int, error) { - if cmd.err != nil { - return 0, cmd.err - } - switch val := cmd.val.(type) { - case int64: - return int(val), nil - case string: - return strconv.Atoi(val) - default: - err := fmt.Errorf("redis: unexpected type=%T for Int", val) - return 0, err - } -} - -func (cmd *Cmd) Int64() (int64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toInt64(cmd.val) -} - -func toInt64(val interface{}) (int64, error) { - switch val := val.(type) { - case int64: - return val, nil - case string: - return strconv.ParseInt(val, 10, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Int64", val) - return 0, err - } -} - -func (cmd *Cmd) Uint64() (uint64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toUint64(cmd.val) -} - -func toUint64(val interface{}) (uint64, error) { - switch val := val.(type) { - case int64: - return uint64(val), nil - case string: - return strconv.ParseUint(val, 10, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) - return 0, err - } -} - -func (cmd *Cmd) Float32() (float32, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toFloat32(cmd.val) -} - -func toFloat32(val interface{}) (float32, error) { - switch val := val.(type) { - case int64: - return float32(val), nil - case string: - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil - default: - err := fmt.Errorf("redis: unexpected type=%T for Float32", val) - return 0, err - } -} - -func (cmd *Cmd) Float64() (float64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toFloat64(cmd.val) -} - -func toFloat64(val interface{}) (float64, error) { - switch val := val.(type) { - case int64: - return float64(val), nil - case string: - return strconv.ParseFloat(val, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Float64", val) - return 0, err - } -} - -func (cmd *Cmd) Bool() (bool, error) { - if cmd.err != nil { - return false, cmd.err - } - return toBool(cmd.val) -} - -func toBool(val interface{}) (bool, error) { - switch val := val.(type) { - case int64: - return val != 0, nil - case string: - return strconv.ParseBool(val) - default: - err := fmt.Errorf("redis: unexpected type=%T for Bool", val) - return false, err - } -} - -func (cmd *Cmd) Slice() ([]interface{}, error) { - if cmd.err != nil { - return nil, cmd.err - } - switch val := cmd.val.(type) { - case []interface{}: - return val, nil - default: - return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) - } -} - -func (cmd *Cmd) StringSlice() ([]string, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - ss := make([]string, len(slice)) - for i, iface := range slice { - val, err := toString(iface) - if err != nil { - return nil, err - } - ss[i] = val - } - return ss, nil -} - -func (cmd *Cmd) Int64Slice() ([]int64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - nums := make([]int64, len(slice)) - for i, iface := range slice { - val, err := toInt64(iface) - if err != nil { - return nil, err - } - nums[i] = val - } - return nums, nil -} - -func (cmd *Cmd) Uint64Slice() ([]uint64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - nums := make([]uint64, len(slice)) - for i, iface := range slice { - val, err := toUint64(iface) - if err != nil { - return nil, err - } - nums[i] = val - } - return nums, nil -} - -func (cmd *Cmd) Float32Slice() ([]float32, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - floats := make([]float32, len(slice)) - for i, iface := range slice { - val, err := toFloat32(iface) - if err != nil { - return nil, err - } - floats[i] = val - } - return floats, nil -} - -func (cmd *Cmd) Float64Slice() ([]float64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - floats := make([]float64, len(slice)) - for i, iface := range slice { - val, err := toFloat64(iface) - if err != nil { - return nil, err - } - floats[i] = val - } - return floats, nil -} - -func (cmd *Cmd) BoolSlice() ([]bool, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - bools := make([]bool, len(slice)) - for i, iface := range slice { - val, err := toBool(iface) - if err != nil { - return nil, err - } - bools[i] = val - } - return bools, nil -} - -func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadReply(sliceParser) - return err -} - -// sliceParser implements proto.MultiBulkParse. -func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { - vals := make([]interface{}, n) - for i := 0; i < len(vals); i++ { - v, err := rd.ReadReply(sliceParser) - if err != nil { - if err == Nil { - vals[i] = nil - continue - } - if err, ok := err.(proto.RedisError); ok { - vals[i] = err - continue - } - return nil, err - } - vals[i] = v - } - return vals, nil -} - -//------------------------------------------------------------------------------ - -type SliceCmd struct { - baseCmd - - val []interface{} -} - -var _ Cmder = (*SliceCmd)(nil) - -func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { - return &SliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *SliceCmd) SetVal(val []interface{}) { - cmd.val = val -} - -func (cmd *SliceCmd) Val() []interface{} { - return cmd.val -} - -func (cmd *SliceCmd) Result() ([]interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *SliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -// Scan scans the results from the map into a destination struct. The map keys -// are matched in the Redis struct fields by the `redis:"field"` tag. -func (cmd *SliceCmd) Scan(dst interface{}) error { - if cmd.err != nil { - return cmd.err - } - - // Pass the list of keys and values. - // Skip the first two args for: HMGET key - var args []interface{} - if cmd.args[0] == "hmget" { - args = cmd.args[2:] - } else { - // Otherwise, it's: MGET field field ... - args = cmd.args[1:] - } - - return hscan.Scan(dst, args, cmd.val) -} - -func (cmd *SliceCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadArrayReply(sliceParser) - if err != nil { - return err - } - cmd.val = v.([]interface{}) - return nil -} - -//------------------------------------------------------------------------------ - -type StatusCmd struct { - baseCmd - - val string -} - -var _ Cmder = (*StatusCmd)(nil) - -func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { - return &StatusCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StatusCmd) SetVal(val string) { - cmd.val = val -} - -func (cmd *StatusCmd) Val() string { - return cmd.val -} - -func (cmd *StatusCmd) Result() (string, error) { - return cmd.val, cmd.err -} - -func (cmd *StatusCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadString() - return err -} - -//------------------------------------------------------------------------------ - -type IntCmd struct { - baseCmd - - val int64 -} - -var _ Cmder = (*IntCmd)(nil) - -func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { - return &IntCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *IntCmd) SetVal(val int64) { - cmd.val = val -} - -func (cmd *IntCmd) Val() int64 { - return cmd.val -} - -func (cmd *IntCmd) Result() (int64, error) { - return cmd.val, cmd.err -} - -func (cmd *IntCmd) Uint64() (uint64, error) { - return uint64(cmd.val), cmd.err -} - -func (cmd *IntCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadIntReply() - return err -} - -//------------------------------------------------------------------------------ - -type IntSliceCmd struct { - baseCmd - - val []int64 -} - -var _ Cmder = (*IntSliceCmd)(nil) - -func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { - return &IntSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *IntSliceCmd) SetVal(val []int64) { - cmd.val = val -} - -func (cmd *IntSliceCmd) Val() []int64 { - return cmd.val -} - -func (cmd *IntSliceCmd) Result() ([]int64, error) { - return cmd.val, cmd.err -} - -func (cmd *IntSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]int64, n) - for i := 0; i < len(cmd.val); i++ { - num, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.val[i] = num - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type DurationCmd struct { - baseCmd - - val time.Duration - precision time.Duration -} - -var _ Cmder = (*DurationCmd)(nil) - -func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { - return &DurationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - precision: precision, - } -} - -func (cmd *DurationCmd) SetVal(val time.Duration) { - cmd.val = val -} - -func (cmd *DurationCmd) Val() time.Duration { - return cmd.val -} - -func (cmd *DurationCmd) Result() (time.Duration, error) { - return cmd.val, cmd.err -} - -func (cmd *DurationCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *DurationCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadIntReply() - if err != nil { - return err - } - switch n { - // -2 if the key does not exist - // -1 if the key exists but has no associated expire - case -2, -1: - cmd.val = time.Duration(n) - default: - cmd.val = time.Duration(n) * cmd.precision - } - return nil -} - -//------------------------------------------------------------------------------ - -type TimeCmd struct { - baseCmd - - val time.Time -} - -var _ Cmder = (*TimeCmd)(nil) - -func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { - return &TimeCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *TimeCmd) SetVal(val time.Time) { - cmd.val = val -} - -func (cmd *TimeCmd) Val() time.Time { - return cmd.val -} - -func (cmd *TimeCmd) Result() (time.Time, error) { - return cmd.val, cmd.err -} - -func (cmd *TimeCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *TimeCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d elements, expected 2", n) - } - - sec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - microsec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - cmd.val = time.Unix(sec, microsec*1000) - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type BoolCmd struct { - baseCmd - - val bool -} - -var _ Cmder = (*BoolCmd)(nil) - -func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { - return &BoolCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *BoolCmd) SetVal(val bool) { - cmd.val = val -} - -func (cmd *BoolCmd) Val() bool { - return cmd.val -} - -func (cmd *BoolCmd) Result() (bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *BoolCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadReply(nil) - // `SET key value NX` returns nil when key already exists. But - // `SETNX key value` returns bool (0/1). So convert nil to bool. - if err == Nil { - cmd.val = false - return nil - } - if err != nil { - return err - } - switch v := v.(type) { - case int64: - cmd.val = v == 1 - return nil - case string: - cmd.val = v == "OK" - return nil - default: - return fmt.Errorf("got %T, wanted int64 or string", v) - } -} - -//------------------------------------------------------------------------------ - -type StringCmd struct { - baseCmd - - val string -} - -var _ Cmder = (*StringCmd)(nil) - -func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { - return &StringCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringCmd) SetVal(val string) { - cmd.val = val -} - -func (cmd *StringCmd) Val() string { - return cmd.val -} - -func (cmd *StringCmd) Result() (string, error) { - return cmd.Val(), cmd.err -} - -func (cmd *StringCmd) Bytes() ([]byte, error) { - return util.StringToBytes(cmd.val), cmd.err -} - -func (cmd *StringCmd) Bool() (bool, error) { - if cmd.err != nil { - return false, cmd.err - } - return strconv.ParseBool(cmd.val) -} - -func (cmd *StringCmd) Int() (int, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.Atoi(cmd.Val()) -} - -func (cmd *StringCmd) Int64() (int64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseInt(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Uint64() (uint64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseUint(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Float32() (float32, error) { - if cmd.err != nil { - return 0, cmd.err - } - f, err := strconv.ParseFloat(cmd.Val(), 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -func (cmd *StringCmd) Float64() (float64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseFloat(cmd.Val(), 64) -} - -func (cmd *StringCmd) Time() (time.Time, error) { - if cmd.err != nil { - return time.Time{}, cmd.err - } - return time.Parse(time.RFC3339Nano, cmd.Val()) -} - -func (cmd *StringCmd) Scan(val interface{}) error { - if cmd.err != nil { - return cmd.err - } - return proto.Scan([]byte(cmd.val), val) -} - -func (cmd *StringCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadString() - return err -} - -//------------------------------------------------------------------------------ - -type FloatCmd struct { - baseCmd - - val float64 -} - -var _ Cmder = (*FloatCmd)(nil) - -func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { - return &FloatCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *FloatCmd) SetVal(val float64) { - cmd.val = val -} - -func (cmd *FloatCmd) Val() float64 { - return cmd.val -} - -func (cmd *FloatCmd) Result() (float64, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *FloatCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadFloatReply() - return err -} - -//------------------------------------------------------------------------------ - -type FloatSliceCmd struct { - baseCmd - - val []float64 -} - -var _ Cmder = (*FloatSliceCmd)(nil) - -func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { - return &FloatSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *FloatSliceCmd) SetVal(val []float64) { - cmd.val = val -} - -func (cmd *FloatSliceCmd) Val() []float64 { - return cmd.val -} - -func (cmd *FloatSliceCmd) Result() ([]float64, error) { - return cmd.val, cmd.err -} - -func (cmd *FloatSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]float64, n) - for i := 0; i < len(cmd.val); i++ { - switch num, err := rd.ReadFloatReply(); { - case err == Nil: - cmd.val[i] = 0 - case err != nil: - return nil, err - default: - cmd.val[i] = num - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringSliceCmd struct { - baseCmd - - val []string -} - -var _ Cmder = (*StringSliceCmd)(nil) - -func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { - return &StringSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringSliceCmd) SetVal(val []string) { - cmd.val = val -} - -func (cmd *StringSliceCmd) Val() []string { - return cmd.val -} - -func (cmd *StringSliceCmd) Result() ([]string, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *StringSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { - return proto.ScanSlice(cmd.Val(), container) -} - -func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]string, n) - for i := 0; i < len(cmd.val); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.val[i] = "" - case err != nil: - return nil, err - default: - cmd.val[i] = s - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type BoolSliceCmd struct { - baseCmd - - val []bool -} - -var _ Cmder = (*BoolSliceCmd)(nil) - -func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { - return &BoolSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *BoolSliceCmd) SetVal(val []bool) { - cmd.val = val -} - -func (cmd *BoolSliceCmd) Val() []bool { - return cmd.val -} - -func (cmd *BoolSliceCmd) Result() ([]bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]bool, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.val[i] = n == 1 - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringStringMapCmd struct { - baseCmd - - val map[string]string -} - -var _ Cmder = (*StringStringMapCmd)(nil) - -func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd { - return &StringStringMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringStringMapCmd) SetVal(val map[string]string) { - cmd.val = val -} - -func (cmd *StringStringMapCmd) Val() map[string]string { - return cmd.val -} - -func (cmd *StringStringMapCmd) Result() (map[string]string, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStringMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -// Scan scans the results from the map into a destination struct. The map keys -// are matched in the Redis struct fields by the `redis:"field"` tag. -func (cmd *StringStringMapCmd) Scan(dest interface{}) error { - if cmd.err != nil { - return cmd.err - } - - strct, err := hscan.Struct(dest) - if err != nil { - return err - } - - for k, v := range cmd.val { - if err := strct.Scan(k, v); err != nil { - return err - } - } - - return nil -} - -func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]string, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - value, err := rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val[key] = value - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringIntMapCmd struct { - baseCmd - - val map[string]int64 -} - -var _ Cmder = (*StringIntMapCmd)(nil) - -func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { - return &StringIntMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringIntMapCmd) SetVal(val map[string]int64) { - cmd.val = val -} - -func (cmd *StringIntMapCmd) Val() map[string]int64 { - return cmd.val -} - -func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { - return cmd.val, cmd.err -} - -func (cmd *StringIntMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]int64, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - cmd.val[key] = n - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringStructMapCmd struct { - baseCmd - - val map[string]struct{} -} - -var _ Cmder = (*StringStructMapCmd)(nil) - -func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { - return &StringStructMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { - cmd.val = val -} - -func (cmd *StringStructMapCmd) Val() map[string]struct{} { - return cmd.val -} - -func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStructMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]struct{}, n) - for i := int64(0); i < n; i++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - cmd.val[key] = struct{}{} - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XMessage struct { - ID string - Values map[string]interface{} -} - -type XMessageSliceCmd struct { - baseCmd - - val []XMessage -} - -var _ Cmder = (*XMessageSliceCmd)(nil) - -func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { - return &XMessageSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { - cmd.val = val -} - -func (cmd *XMessageSliceCmd) Val() []XMessage { - return cmd.val -} - -func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { - return cmd.val, cmd.err -} - -func (cmd *XMessageSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { - var err error - cmd.val, err = readXMessageSlice(rd) - return err -} - -func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - msgs := make([]XMessage, n) - for i := 0; i < n; i++ { - var err error - msgs[i], err = readXMessage(rd) - if err != nil { - return nil, err - } - } - return msgs, nil -} - -func readXMessage(rd *proto.Reader) (XMessage, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return XMessage{}, err - } - if n != 2 { - return XMessage{}, fmt.Errorf("got %d, wanted 2", n) - } - - id, err := rd.ReadString() - if err != nil { - return XMessage{}, err - } - - var values map[string]interface{} - - v, err := rd.ReadArrayReply(stringInterfaceMapParser) - if err != nil { - if err != proto.Nil { - return XMessage{}, err - } - } else { - values = v.(map[string]interface{}) - } - - return XMessage{ - ID: id, - Values: values, - }, nil -} - -// stringInterfaceMapParser implements proto.MultiBulkParse. -func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { - m := make(map[string]interface{}, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - value, err := rd.ReadString() - if err != nil { - return nil, err - } - - m[key] = value - } - return m, nil -} - -//------------------------------------------------------------------------------ - -type XStream struct { - Stream string - Messages []XMessage -} - -type XStreamSliceCmd struct { - baseCmd - - val []XStream -} - -var _ Cmder = (*XStreamSliceCmd)(nil) - -func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { - return &XStreamSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XStreamSliceCmd) SetVal(val []XStream) { - cmd.val = val -} - -func (cmd *XStreamSliceCmd) Val() []XStream { - return cmd.val -} - -func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { - return cmd.val, cmd.err -} - -func (cmd *XStreamSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]XStream, n) - for i := 0; i < len(cmd.val); i++ { - i := i - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - - stream, err := rd.ReadString() - if err != nil { - return nil, err - } - - msgs, err := readXMessageSlice(rd) - if err != nil { - return nil, err - } - - cmd.val[i] = XStream{ - Stream: stream, - Messages: msgs, - } - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XPending struct { - Count int64 - Lower string - Higher string - Consumers map[string]int64 -} - -type XPendingCmd struct { - baseCmd - val *XPending -} - -var _ Cmder = (*XPendingCmd)(nil) - -func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { - return &XPendingCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XPendingCmd) SetVal(val *XPending) { - cmd.val = val -} - -func (cmd *XPendingCmd) Val() *XPending { - return cmd.val -} - -func (cmd *XPendingCmd) Result() (*XPending, error) { - return cmd.val, cmd.err -} - -func (cmd *XPendingCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 4 { - return nil, fmt.Errorf("got %d, wanted 4", n) - } - - count, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - lower, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - higher, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - cmd.val = &XPending{ - Count: count, - Lower: lower, - Higher: higher, - } - _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - for i := int64(0); i < n; i++ { - _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - - consumerName, err := rd.ReadString() - if err != nil { - return nil, err - } - - consumerPending, err := rd.ReadInt() - if err != nil { - return nil, err - } - - if cmd.val.Consumers == nil { - cmd.val.Consumers = make(map[string]int64) - } - cmd.val.Consumers[consumerName] = consumerPending - - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - if err != nil && err != Nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XPendingExt struct { - ID string - Consumer string - Idle time.Duration - RetryCount int64 -} - -type XPendingExtCmd struct { - baseCmd - val []XPendingExt -} - -var _ Cmder = (*XPendingExtCmd)(nil) - -func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { - return &XPendingExtCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { - cmd.val = val -} - -func (cmd *XPendingExtCmd) Val() []XPendingExt { - return cmd.val -} - -func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { - return cmd.val, cmd.err -} - -func (cmd *XPendingExtCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]XPendingExt, 0, n) - for i := int64(0); i < n; i++ { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 4 { - return nil, fmt.Errorf("got %d, wanted 4", n) - } - - id, err := rd.ReadString() - if err != nil { - return nil, err - } - - consumer, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - idle, err := rd.ReadIntReply() - if err != nil && err != Nil { - return nil, err - } - - retryCount, err := rd.ReadIntReply() - if err != nil && err != Nil { - return nil, err - } - - cmd.val = append(cmd.val, XPendingExt{ - ID: id, - Consumer: consumer, - Idle: time.Duration(idle) * time.Millisecond, - RetryCount: retryCount, - }) - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XAutoClaimCmd struct { - baseCmd - - start string - val []XMessage -} - -var _ Cmder = (*XAutoClaimCmd)(nil) - -func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { - return &XAutoClaimCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { - cmd.val = val - cmd.start = start -} - -func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { - return cmd.val, cmd.start -} - -func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { - return cmd.val, cmd.start, cmd.err -} - -func (cmd *XAutoClaimCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - var err error - - cmd.start, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val, err = readXMessageSlice(rd) - if err != nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XAutoClaimJustIDCmd struct { - baseCmd - - start string - val []string -} - -var _ Cmder = (*XAutoClaimJustIDCmd)(nil) - -func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { - return &XAutoClaimJustIDCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { - cmd.val = val - cmd.start = start -} - -func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { - return cmd.val, cmd.start -} - -func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { - return cmd.val, cmd.start, cmd.err -} - -func (cmd *XAutoClaimJustIDCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - var err error - - cmd.start, err = rd.ReadString() - if err != nil { - return nil, err - } - - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - cmd.val = make([]string, nn) - for i := 0; i < nn; i++ { - cmd.val[i], err = rd.ReadString() - if err != nil { - return nil, err - } - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XInfoConsumersCmd struct { - baseCmd - val []XInfoConsumer -} - -type XInfoConsumer struct { - Name string - Pending int64 - Idle int64 -} - -var _ Cmder = (*XInfoConsumersCmd)(nil) - -func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { - return &XInfoConsumersCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "consumers", stream, group}, - }, - } -} - -func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { - cmd.val = val -} - -func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { - return cmd.val -} - -func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoConsumersCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]XInfoConsumer, n) - - for i := 0; i < n; i++ { - cmd.val[i], err = readXConsumerInfo(rd) - if err != nil { - return err - } - } - - return nil -} - -func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) { - var consumer XInfoConsumer - - n, err := rd.ReadArrayLen() - if err != nil { - return consumer, err - } - if n != 6 { - return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n) - } - - for i := 0; i < 3; i++ { - key, err := rd.ReadString() - if err != nil { - return consumer, err - } - - val, err := rd.ReadString() - if err != nil { - return consumer, err - } - - switch key { - case "name": - consumer.Name = val - case "pending": - consumer.Pending, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return consumer, err - } - case "idle": - consumer.Idle, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return consumer, err - } - default: - return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) - } - } - - return consumer, nil -} - -//------------------------------------------------------------------------------ - -type XInfoGroupsCmd struct { - baseCmd - val []XInfoGroup -} - -type XInfoGroup struct { - Name string - Consumers int64 - Pending int64 - LastDeliveredID string -} - -var _ Cmder = (*XInfoGroupsCmd)(nil) - -func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { - return &XInfoGroupsCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "groups", stream}, - }, - } -} - -func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { - cmd.val = val -} - -func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { - return cmd.val -} - -func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoGroupsCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]XInfoGroup, n) - - for i := 0; i < n; i++ { - cmd.val[i], err = readXGroupInfo(rd) - if err != nil { - return err - } - } - - return nil -} - -func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) { - var group XInfoGroup - - n, err := rd.ReadArrayLen() - if err != nil { - return group, err - } - if n != 8 { - return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n) - } - - for i := 0; i < 4; i++ { - key, err := rd.ReadString() - if err != nil { - return group, err - } - - val, err := rd.ReadString() - if err != nil { - return group, err - } - - switch key { - case "name": - group.Name = val - case "consumers": - group.Consumers, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return group, err - } - case "pending": - group.Pending, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return group, err - } - case "last-delivered-id": - group.LastDeliveredID = val - default: - return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key) - } - } - - return group, nil -} - -//------------------------------------------------------------------------------ - -type XInfoStreamCmd struct { - baseCmd - val *XInfoStream -} - -type XInfoStream struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - Groups int64 - LastGeneratedID string - FirstEntry XMessage - LastEntry XMessage -} - -var _ Cmder = (*XInfoStreamCmd)(nil) - -func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { - return &XInfoStreamCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "stream", stream}, - }, - } -} - -func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { - cmd.val = val -} - -func (cmd *XInfoStreamCmd) Val() *XInfoStream { - return cmd.val -} - -func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoStreamCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadReply(xStreamInfoParser) - if err != nil { - return err - } - cmd.val = v.(*XInfoStream) - return nil -} - -func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) { - if n != 14 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ - "wanted 14", n) - } - var info XInfoStream - for i := 0; i < 7; i++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - switch key { - case "length": - info.Length, err = rd.ReadIntReply() - case "radix-tree-keys": - info.RadixTreeKeys, err = rd.ReadIntReply() - case "radix-tree-nodes": - info.RadixTreeNodes, err = rd.ReadIntReply() - case "groups": - info.Groups, err = rd.ReadIntReply() - case "last-generated-id": - info.LastGeneratedID, err = rd.ReadString() - case "first-entry": - info.FirstEntry, err = readXMessage(rd) - if err == Nil { - err = nil - } - case "last-entry": - info.LastEntry, err = readXMessage(rd) - if err == Nil { - err = nil - } - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - if err != nil { - return nil, err - } - } - return &info, nil -} - -//------------------------------------------------------------------------------ - -type XInfoStreamFullCmd struct { - baseCmd - val *XInfoStreamFull -} - -type XInfoStreamFull struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - LastGeneratedID string - Entries []XMessage - Groups []XInfoStreamGroup -} - -type XInfoStreamGroup struct { - Name string - LastDeliveredID string - PelCount int64 - Pending []XInfoStreamGroupPending - Consumers []XInfoStreamConsumer -} - -type XInfoStreamGroupPending struct { - ID string - Consumer string - DeliveryTime time.Time - DeliveryCount int64 -} - -type XInfoStreamConsumer struct { - Name string - SeenTime time.Time - PelCount int64 - Pending []XInfoStreamConsumerPending -} - -type XInfoStreamConsumerPending struct { - ID string - DeliveryTime time.Time - DeliveryCount int64 -} - -var _ Cmder = (*XInfoStreamFullCmd)(nil) - -func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { - return &XInfoStreamFullCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { - cmd.val = val -} - -func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { - return cmd.val -} - -func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoStreamFullCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - if n != 12 { - return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 12", n) - } - - cmd.val = &XInfoStreamFull{} - - for i := 0; i < 6; i++ { - key, err := rd.ReadString() - if err != nil { - return err - } - - switch key { - case "length": - cmd.val.Length, err = rd.ReadIntReply() - case "radix-tree-keys": - cmd.val.RadixTreeKeys, err = rd.ReadIntReply() - case "radix-tree-nodes": - cmd.val.RadixTreeNodes, err = rd.ReadIntReply() - case "last-generated-id": - cmd.val.LastGeneratedID, err = rd.ReadString() - case "entries": - cmd.val.Entries, err = readXMessageSlice(rd) - case "groups": - cmd.val.Groups, err = readStreamGroups(rd) - default: - return fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - if err != nil { - return err - } - } - return nil -} - -func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - groups := make([]XInfoStreamGroup, 0, n) - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 10 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 10", nn) - } - - group := XInfoStreamGroup{} - - for f := 0; f < 5; f++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - switch key { - case "name": - group.Name, err = rd.ReadString() - case "last-delivered-id": - group.LastDeliveredID, err = rd.ReadString() - case "pel-count": - group.PelCount, err = rd.ReadIntReply() - case "pending": - group.Pending, err = readXInfoStreamGroupPending(rd) - case "consumers": - group.Consumers, err = readXInfoStreamConsumers(rd) - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - - if err != nil { - return nil, err - } - } - - groups = append(groups, group) - } - - return groups, nil -} - -func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - pending := make([]XInfoStreamGroupPending, 0, n) - - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 4 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 4", nn) - } - - p := XInfoStreamGroupPending{} - - p.ID, err = rd.ReadString() - if err != nil { - return nil, err - } - - p.Consumer, err = rd.ReadString() - if err != nil { - return nil, err - } - - delivery, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) - - p.DeliveryCount, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - - pending = append(pending, p) - } - - return pending, nil -} - -func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - consumers := make([]XInfoStreamConsumer, 0, n) - - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 8 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 8", nn) - } - - c := XInfoStreamConsumer{} - - for f := 0; f < 4; f++ { - cKey, err := rd.ReadString() - if err != nil { - return nil, err - } - - switch cKey { - case "name": - c.Name, err = rd.ReadString() - case "seen-time": - seen, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond)) - case "pel-count": - c.PelCount, err = rd.ReadIntReply() - case "pending": - pendingNumber, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) - - for pn := 0; pn < pendingNumber; pn++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 3 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ - "wanted 3", nn) - } - - p := XInfoStreamConsumerPending{} - - p.ID, err = rd.ReadString() - if err != nil { - return nil, err - } - - delivery, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) - - p.DeliveryCount, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - - c.Pending = append(c.Pending, p) - } - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", cKey) - } - if err != nil { - return nil, err - } - } - consumers = append(consumers, c) - } - - return consumers, nil -} - -//------------------------------------------------------------------------------ - -type ZSliceCmd struct { - baseCmd - - val []Z -} - -var _ Cmder = (*ZSliceCmd)(nil) - -func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { - return &ZSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ZSliceCmd) SetVal(val []Z) { - cmd.val = val -} - -func (cmd *ZSliceCmd) Val() []Z { - return cmd.val -} - -func (cmd *ZSliceCmd) Result() ([]Z, error) { - return cmd.val, cmd.err -} - -func (cmd *ZSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]Z, n/2) - for i := 0; i < len(cmd.val); i++ { - member, err := rd.ReadString() - if err != nil { - return nil, err - } - - score, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - cmd.val[i] = Z{ - Member: member, - Score: score, - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type ZWithKeyCmd struct { - baseCmd - - val *ZWithKey -} - -var _ Cmder = (*ZWithKeyCmd)(nil) - -func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { - return &ZWithKeyCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { - cmd.val = val -} - -func (cmd *ZWithKeyCmd) Val() *ZWithKey { - return cmd.val -} - -func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *ZWithKeyCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 3 { - return nil, fmt.Errorf("got %d elements, expected 3", n) - } - - cmd.val = &ZWithKey{} - var err error - - cmd.val.Key, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val.Member, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val.Score, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type ScanCmd struct { - baseCmd - - page []string - cursor uint64 - - process cmdable -} - -var _ Cmder = (*ScanCmd)(nil) - -func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { - return &ScanCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - process: process, - } -} - -func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { - cmd.page = page - cmd.cursor = cursor -} - -func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { - return cmd.page, cmd.cursor -} - -func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { - return cmd.page, cmd.cursor, cmd.err -} - -func (cmd *ScanCmd) String() string { - return cmdString(cmd, cmd.page) -} - -func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) { - cmd.page, cmd.cursor, err = rd.ReadScanReply() - return err -} - -// Iterator creates a new ScanIterator. -func (cmd *ScanCmd) Iterator() *ScanIterator { - return &ScanIterator{ - cmd: cmd, - } -} - -//------------------------------------------------------------------------------ - -type ClusterNode struct { - ID string - Addr string -} - -type ClusterSlot struct { - Start int - End int - Nodes []ClusterNode -} - -type ClusterSlotsCmd struct { - baseCmd - - val []ClusterSlot -} - -var _ Cmder = (*ClusterSlotsCmd)(nil) - -func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { - return &ClusterSlotsCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { - cmd.val = val -} - -func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { - return cmd.val -} - -func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *ClusterSlotsCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]ClusterSlot, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n < 2 { - err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) - return nil, err - } - - start, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - end, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - nodes := make([]ClusterNode, n-2) - for j := 0; j < len(nodes); j++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 && n != 3 { - err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) - return nil, err - } - - ip, err := rd.ReadString() - if err != nil { - return nil, err - } - - port, err := rd.ReadString() - if err != nil { - return nil, err - } - - nodes[j].Addr = net.JoinHostPort(ip, port) - - if n == 3 { - id, err := rd.ReadString() - if err != nil { - return nil, err - } - nodes[j].ID = id - } - } - - cmd.val[i] = ClusterSlot{ - Start: int(start), - End: int(end), - Nodes: nodes, - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -// GeoLocation is used with GeoAdd to add geospatial location. -type GeoLocation struct { - Name string - Longitude, Latitude, Dist float64 - GeoHash int64 -} - -// GeoRadiusQuery is used with GeoRadius to query geospatial index. -type GeoRadiusQuery struct { - Radius float64 - // Can be m, km, ft, or mi. Default is km. - Unit string - WithCoord bool - WithDist bool - WithGeoHash bool - Count int - // Can be ASC or DESC. Default is no sort order. - Sort string - Store string - StoreDist string -} - -type GeoLocationCmd struct { - baseCmd - - q *GeoRadiusQuery - locations []GeoLocation -} - -var _ Cmder = (*GeoLocationCmd)(nil) - -func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { - return &GeoLocationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: geoLocationArgs(q, args...), - }, - q: q, - } -} - -func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { - args = append(args, q.Radius) - if q.Unit != "" { - args = append(args, q.Unit) - } else { - args = append(args, "km") - } - if q.WithCoord { - args = append(args, "withcoord") - } - if q.WithDist { - args = append(args, "withdist") - } - if q.WithGeoHash { - args = append(args, "withhash") - } - if q.Count > 0 { - args = append(args, "count", q.Count) - } - if q.Sort != "" { - args = append(args, q.Sort) - } - if q.Store != "" { - args = append(args, "store") - args = append(args, q.Store) - } - if q.StoreDist != "" { - args = append(args, "storedist") - args = append(args, q.StoreDist) - } - return args -} - -func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { - cmd.locations = locations -} - -func (cmd *GeoLocationCmd) Val() []GeoLocation { - return cmd.locations -} - -func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { - return cmd.locations, cmd.err -} - -func (cmd *GeoLocationCmd) String() string { - return cmdString(cmd, cmd.locations) -} - -func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) - if err != nil { - return err - } - cmd.locations = v.([]GeoLocation) - return nil -} - -func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - locs := make([]GeoLocation, 0, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(newGeoLocationParser(q)) - if err != nil { - return nil, err - } - switch vv := v.(type) { - case string: - locs = append(locs, GeoLocation{ - Name: vv, - }) - case *GeoLocation: - // TODO: avoid copying - locs = append(locs, *vv) - default: - return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) - } - } - return locs, nil - } -} - -func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - var loc GeoLocation - var err error - - loc.Name, err = rd.ReadString() - if err != nil { - return nil, err - } - if q.WithDist { - loc.Dist, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - if q.WithGeoHash { - loc.GeoHash, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - } - if q.WithCoord { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 { - return nil, fmt.Errorf("got %d coordinates, expected 2", n) - } - - loc.Longitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - loc.Latitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - - return &loc, nil - } -} - -//------------------------------------------------------------------------------ - -// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. -type GeoSearchQuery struct { - Member string - - // Latitude and Longitude when using FromLonLat option. - Longitude float64 - Latitude float64 - - // Distance and unit when using ByRadius option. - // Can use m, km, ft, or mi. Default is km. - Radius float64 - RadiusUnit string - - // Height, width and unit when using ByBox option. - // Can be m, km, ft, or mi. Default is km. - BoxWidth float64 - BoxHeight float64 - BoxUnit string - - // Can be ASC or DESC. Default is no sort order. - Sort string - Count int - CountAny bool -} - -type GeoSearchLocationQuery struct { - GeoSearchQuery - - WithCoord bool - WithDist bool - WithHash bool -} - -type GeoSearchStoreQuery struct { - GeoSearchQuery - - // When using the StoreDist option, the command stores the items in a - // sorted set populated with their distance from the center of the circle or box, - // as a floating-point number, in the same unit specified for that shape. - StoreDist bool -} - -func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { - args = geoSearchArgs(&q.GeoSearchQuery, args) - - if q.WithCoord { - args = append(args, "withcoord") - } - if q.WithDist { - args = append(args, "withdist") - } - if q.WithHash { - args = append(args, "withhash") - } - - return args -} - -func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { - if q.Member != "" { - args = append(args, "frommember", q.Member) - } else { - args = append(args, "fromlonlat", q.Longitude, q.Latitude) - } - - if q.Radius > 0 { - if q.RadiusUnit == "" { - q.RadiusUnit = "km" - } - args = append(args, "byradius", q.Radius, q.RadiusUnit) - } else { - if q.BoxUnit == "" { - q.BoxUnit = "km" - } - args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) - } - - if q.Sort != "" { - args = append(args, q.Sort) - } - - if q.Count > 0 { - args = append(args, "count", q.Count) - if q.CountAny { - args = append(args, "any") - } - } - - return args -} - -type GeoSearchLocationCmd struct { - baseCmd - - opt *GeoSearchLocationQuery - val []GeoLocation -} - -var _ Cmder = (*GeoSearchLocationCmd)(nil) - -func NewGeoSearchLocationCmd( - ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, -) *GeoSearchLocationCmd { - return &GeoSearchLocationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - opt: opt, - } -} - -func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { - cmd.val = val -} - -func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { - return cmd.val -} - -func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { - return cmd.val, cmd.err -} - -func (cmd *GeoSearchLocationCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]GeoLocation, n) - for i := 0; i < n; i++ { - _, err = rd.ReadArrayLen() - if err != nil { - return err - } - - var loc GeoLocation - - loc.Name, err = rd.ReadString() - if err != nil { - return err - } - if cmd.opt.WithDist { - loc.Dist, err = rd.ReadFloatReply() - if err != nil { - return err - } - } - if cmd.opt.WithHash { - loc.GeoHash, err = rd.ReadIntReply() - if err != nil { - return err - } - } - if cmd.opt.WithCoord { - nn, err := rd.ReadArrayLen() - if err != nil { - return err - } - if nn != 2 { - return fmt.Errorf("got %d coordinates, expected 2", nn) - } - - loc.Longitude, err = rd.ReadFloatReply() - if err != nil { - return err - } - loc.Latitude, err = rd.ReadFloatReply() - if err != nil { - return err - } - } - - cmd.val[i] = loc - } - - return nil -} - -//------------------------------------------------------------------------------ - -type GeoPos struct { - Longitude, Latitude float64 -} - -type GeoPosCmd struct { - baseCmd - - val []*GeoPos -} - -var _ Cmder = (*GeoPosCmd)(nil) - -func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { - return &GeoPosCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { - cmd.val = val -} - -func (cmd *GeoPosCmd) Val() []*GeoPos { - return cmd.val -} - -func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *GeoPosCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]*GeoPos, n) - for i := 0; i < len(cmd.val); i++ { - i := i - _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - longitude, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - latitude, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - cmd.val[i] = &GeoPos{ - Longitude: longitude, - Latitude: latitude, - } - return nil, nil - }) - if err != nil { - if err == Nil { - cmd.val[i] = nil - continue - } - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type CommandInfo struct { - Name string - Arity int8 - Flags []string - ACLFlags []string - FirstKeyPos int8 - LastKeyPos int8 - StepCount int8 - ReadOnly bool -} - -type CommandsInfoCmd struct { - baseCmd - - val map[string]*CommandInfo -} - -var _ Cmder = (*CommandsInfoCmd)(nil) - -func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { - return &CommandsInfoCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { - cmd.val = val -} - -func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { - return cmd.val -} - -func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *CommandsInfoCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]*CommandInfo, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(commandInfoParser) - if err != nil { - return nil, err - } - vv := v.(*CommandInfo) - cmd.val[vv.Name] = vv - } - return nil, nil - }) - return err -} - -func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { - const numArgRedis5 = 6 - const numArgRedis6 = 7 - - switch n { - case numArgRedis5, numArgRedis6: - // continue - default: - return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n) - } - - var cmd CommandInfo - var err error - - cmd.Name, err = rd.ReadString() - if err != nil { - return nil, err - } - - arity, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.Arity = int8(arity) - - _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.Flags = make([]string, n) - for i := 0; i < len(cmd.Flags); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.Flags[i] = "" - case err != nil: - return nil, err - default: - cmd.Flags[i] = s - } - } - return nil, nil - }) - if err != nil { - return nil, err - } - - firstKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.FirstKeyPos = int8(firstKeyPos) - - lastKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.LastKeyPos = int8(lastKeyPos) - - stepCount, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.StepCount = int8(stepCount) - - for _, flag := range cmd.Flags { - if flag == "readonly" { - cmd.ReadOnly = true - break - } - } - - if n == numArgRedis5 { - return &cmd, nil - } - - _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.ACLFlags = make([]string, n) - for i := 0; i < len(cmd.ACLFlags); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.ACLFlags[i] = "" - case err != nil: - return nil, err - default: - cmd.ACLFlags[i] = s - } - } - return nil, nil - }) - if err != nil { - return nil, err - } - - return &cmd, nil -} - -//------------------------------------------------------------------------------ - -type cmdsInfoCache struct { - fn func(ctx context.Context) (map[string]*CommandInfo, error) - - once internal.Once - cmds map[string]*CommandInfo -} - -func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { - return &cmdsInfoCache{ - fn: fn, - } -} - -func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { - err := c.once.Do(func() error { - cmds, err := c.fn(ctx) - if err != nil { - return err - } - - // Extensions have cmd names in upper case. Convert them to lower case. - for k, v := range cmds { - lower := internal.ToLower(k) - if lower != k { - cmds[lower] = v - } - } - - c.cmds = cmds - return nil - }) - return c.cmds, err -} - -//------------------------------------------------------------------------------ - -type SlowLog struct { - ID int64 - Time time.Time - Duration time.Duration - Args []string - // These are also optional fields emitted only by Redis 4.0 or greater: - // https://redis.io/commands/slowlog#output-format - ClientAddr string - ClientName string -} - -type SlowLogCmd struct { - baseCmd - - val []SlowLog -} - -var _ Cmder = (*SlowLogCmd)(nil) - -func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { - return &SlowLogCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *SlowLogCmd) SetVal(val []SlowLog) { - cmd.val = val -} - -func (cmd *SlowLogCmd) Val() []SlowLog { - return cmd.val -} - -func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *SlowLogCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]SlowLog, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n < 4 { - err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n) - return nil, err - } - - id, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - createdAt, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - createdAtTime := time.Unix(createdAt, 0) - - costs, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - costsDuration := time.Duration(costs) * time.Microsecond - - cmdLen, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if cmdLen < 1 { - err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) - return nil, err - } - - cmdString := make([]string, cmdLen) - for i := 0; i < cmdLen; i++ { - cmdString[i], err = rd.ReadString() - if err != nil { - return nil, err - } - } - - var address, name string - for i := 4; i < n; i++ { - str, err := rd.ReadString() - if err != nil { - return nil, err - } - if i == 4 { - address = str - } else if i == 5 { - name = str - } - } - - cmd.val[i] = SlowLog{ - ID: id, - Time: createdAtTime, - Duration: costsDuration, - Args: cmdString, - ClientAddr: address, - ClientName: name, - } - } - return nil, nil - }) - return err -} diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go deleted file mode 100644 index bbfe089..0000000 --- a/vendor/github.com/go-redis/redis/v8/commands.go +++ /dev/null @@ -1,3475 +0,0 @@ -package redis - -import ( - "context" - "errors" - "io" - "time" - - "github.com/go-redis/redis/v8/internal" -) - -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -// For example: -// -// rdb.Set(ctx, key, value, redis.KeepTTL) -const KeepTTL = -1 - -func usePrecise(dur time.Duration) bool { - return dur < time.Second || dur%time.Second != 0 -} - -func formatMs(ctx context.Context, dur time.Duration) int64 { - if dur > 0 && dur < time.Millisecond { - internal.Logger.Printf( - ctx, - "specified duration is %s, but minimal supported value is %s - truncating to 1ms", - dur, time.Millisecond, - ) - return 1 - } - return int64(dur / time.Millisecond) -} - -func formatSec(ctx context.Context, dur time.Duration) int64 { - if dur > 0 && dur < time.Second { - internal.Logger.Printf( - ctx, - "specified duration is %s, but minimal supported value is %s - truncating to 1s", - dur, time.Second, - ) - return 1 - } - return int64(dur / time.Second) -} - -func appendArgs(dst, src []interface{}) []interface{} { - if len(src) == 1 { - return appendArg(dst, src[0]) - } - - dst = append(dst, src...) - return dst -} - -func appendArg(dst []interface{}, arg interface{}) []interface{} { - switch arg := arg.(type) { - case []string: - for _, s := range arg { - dst = append(dst, s) - } - return dst - case []interface{}: - dst = append(dst, arg...) - return dst - case map[string]interface{}: - for k, v := range arg { - dst = append(dst, k, v) - } - return dst - case map[string]string: - for k, v := range arg { - dst = append(dst, k, v) - } - return dst - default: - return append(dst, arg) - } -} - -type Cmdable interface { - Pipeline() Pipeliner - Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) - - TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) - TxPipeline() Pipeliner - - Command(ctx context.Context) *CommandsInfoCmd - ClientGetName(ctx context.Context) *StringCmd - Echo(ctx context.Context, message interface{}) *StringCmd - Ping(ctx context.Context) *StatusCmd - Quit(ctx context.Context) *StatusCmd - Del(ctx context.Context, keys ...string) *IntCmd - Unlink(ctx context.Context, keys ...string) *IntCmd - Dump(ctx context.Context, key string) *StringCmd - Exists(ctx context.Context, keys ...string) *IntCmd - Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd - ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd - Keys(ctx context.Context, pattern string) *StringSliceCmd - Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd - Move(ctx context.Context, key string, db int) *BoolCmd - ObjectRefCount(ctx context.Context, key string) *IntCmd - ObjectEncoding(ctx context.Context, key string) *StringCmd - ObjectIdleTime(ctx context.Context, key string) *DurationCmd - Persist(ctx context.Context, key string) *BoolCmd - PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd - PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd - PTTL(ctx context.Context, key string) *DurationCmd - RandomKey(ctx context.Context) *StringCmd - Rename(ctx context.Context, key, newkey string) *StatusCmd - RenameNX(ctx context.Context, key, newkey string) *BoolCmd - Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd - RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd - Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd - SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd - SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd - Touch(ctx context.Context, keys ...string) *IntCmd - TTL(ctx context.Context, key string) *DurationCmd - Type(ctx context.Context, key string) *StatusCmd - Append(ctx context.Context, key, value string) *IntCmd - Decr(ctx context.Context, key string) *IntCmd - DecrBy(ctx context.Context, key string, decrement int64) *IntCmd - Get(ctx context.Context, key string) *StringCmd - GetRange(ctx context.Context, key string, start, end int64) *StringCmd - GetSet(ctx context.Context, key string, value interface{}) *StringCmd - GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd - GetDel(ctx context.Context, key string) *StringCmd - Incr(ctx context.Context, key string) *IntCmd - IncrBy(ctx context.Context, key string, value int64) *IntCmd - IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd - MGet(ctx context.Context, keys ...string) *SliceCmd - MSet(ctx context.Context, values ...interface{}) *StatusCmd - MSetNX(ctx context.Context, values ...interface{}) *BoolCmd - Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd - SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd - // TODO: rename to SetEx - SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd - SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd - SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd - SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd - StrLen(ctx context.Context, key string) *IntCmd - Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd - - GetBit(ctx context.Context, key string, offset int64) *IntCmd - SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd - BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd - BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd - BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd - BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd - BitOpNot(ctx context.Context, destKey string, key string) *IntCmd - BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd - BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd - - Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd - ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd - SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd - HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd - ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd - - HDel(ctx context.Context, key string, fields ...string) *IntCmd - HExists(ctx context.Context, key, field string) *BoolCmd - HGet(ctx context.Context, key, field string) *StringCmd - HGetAll(ctx context.Context, key string) *StringStringMapCmd - HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd - HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd - HKeys(ctx context.Context, key string) *StringSliceCmd - HLen(ctx context.Context, key string) *IntCmd - HMGet(ctx context.Context, key string, fields ...string) *SliceCmd - HSet(ctx context.Context, key string, values ...interface{}) *IntCmd - HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd - HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd - HVals(ctx context.Context, key string) *StringSliceCmd - HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd - - BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd - BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd - BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd - LIndex(ctx context.Context, key string, index int64) *StringCmd - LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd - LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd - LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd - LLen(ctx context.Context, key string) *IntCmd - LPop(ctx context.Context, key string) *StringCmd - LPopCount(ctx context.Context, key string, count int) *StringSliceCmd - LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd - LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd - LPush(ctx context.Context, key string, values ...interface{}) *IntCmd - LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd - LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd - LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd - LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd - LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd - RPop(ctx context.Context, key string) *StringCmd - RPopCount(ctx context.Context, key string, count int) *StringSliceCmd - RPopLPush(ctx context.Context, source, destination string) *StringCmd - RPush(ctx context.Context, key string, values ...interface{}) *IntCmd - RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd - LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd - BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd - - SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd - SCard(ctx context.Context, key string) *IntCmd - SDiff(ctx context.Context, keys ...string) *StringSliceCmd - SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd - SInter(ctx context.Context, keys ...string) *StringSliceCmd - SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd - SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd - SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd - SMembers(ctx context.Context, key string) *StringSliceCmd - SMembersMap(ctx context.Context, key string) *StringStructMapCmd - SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd - SPop(ctx context.Context, key string) *StringCmd - SPopN(ctx context.Context, key string, count int64) *StringSliceCmd - SRandMember(ctx context.Context, key string) *StringCmd - SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd - SRem(ctx context.Context, key string, members ...interface{}) *IntCmd - SUnion(ctx context.Context, keys ...string) *StringSliceCmd - SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd - - XAdd(ctx context.Context, a *XAddArgs) *StringCmd - XDel(ctx context.Context, stream string, ids ...string) *IntCmd - XLen(ctx context.Context, stream string) *IntCmd - XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd - XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd - XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd - XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd - XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd - XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd - XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd - XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd - XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd - XGroupDestroy(ctx context.Context, stream, group string) *IntCmd - XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd - XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd - XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd - XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd - XPending(ctx context.Context, stream, group string) *XPendingCmd - XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd - XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd - XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd - XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd - XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd - - // TODO: XTrim and XTrimApprox remove in v9. - XTrim(ctx context.Context, key string, maxLen int64) *IntCmd - XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd - XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd - XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd - XTrimMinID(ctx context.Context, key string, minID string) *IntCmd - XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd - XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd - XInfoStream(ctx context.Context, key string) *XInfoStreamCmd - XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd - XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd - - BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd - BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd - - // TODO: remove - // ZAddCh - // ZIncr - // ZAddNXCh - // ZAddXXCh - // ZIncrNX - // ZIncrXX - // in v9. - // use ZAddArgs and ZAddArgsIncr. - - ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd - ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd - ZIncr(ctx context.Context, key string, member *Z) *FloatCmd - ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd - ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd - ZCard(ctx context.Context, key string) *IntCmd - ZCount(ctx context.Context, key, min, max string) *IntCmd - ZLexCount(ctx context.Context, key, min, max string) *IntCmd - ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd - ZInter(ctx context.Context, store *ZStore) *StringSliceCmd - ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd - ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd - ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd - ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd - ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd - ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd - ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd - ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd - ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd - ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd - ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd - ZRank(ctx context.Context, key, member string) *IntCmd - ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd - ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd - ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd - ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd - ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd - ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd - ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd - ZRevRank(ctx context.Context, key, member string) *IntCmd - ZScore(ctx context.Context, key, member string) *FloatCmd - ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd - ZUnion(ctx context.Context, store ZStore) *StringSliceCmd - ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd - ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd - ZDiff(ctx context.Context, keys ...string) *StringSliceCmd - ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd - ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd - - PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd - PFCount(ctx context.Context, keys ...string) *IntCmd - PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd - - BgRewriteAOF(ctx context.Context) *StatusCmd - BgSave(ctx context.Context) *StatusCmd - ClientKill(ctx context.Context, ipPort string) *StatusCmd - ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd - ClientList(ctx context.Context) *StringCmd - ClientPause(ctx context.Context, dur time.Duration) *BoolCmd - ClientID(ctx context.Context) *IntCmd - ConfigGet(ctx context.Context, parameter string) *SliceCmd - ConfigResetStat(ctx context.Context) *StatusCmd - ConfigSet(ctx context.Context, parameter, value string) *StatusCmd - ConfigRewrite(ctx context.Context) *StatusCmd - DBSize(ctx context.Context) *IntCmd - FlushAll(ctx context.Context) *StatusCmd - FlushAllAsync(ctx context.Context) *StatusCmd - FlushDB(ctx context.Context) *StatusCmd - FlushDBAsync(ctx context.Context) *StatusCmd - Info(ctx context.Context, section ...string) *StringCmd - LastSave(ctx context.Context) *IntCmd - Save(ctx context.Context) *StatusCmd - Shutdown(ctx context.Context) *StatusCmd - ShutdownSave(ctx context.Context) *StatusCmd - ShutdownNoSave(ctx context.Context) *StatusCmd - SlaveOf(ctx context.Context, host, port string) *StatusCmd - Time(ctx context.Context) *TimeCmd - DebugObject(ctx context.Context, key string) *StringCmd - ReadOnly(ctx context.Context) *StatusCmd - ReadWrite(ctx context.Context) *StatusCmd - MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd - - Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd - EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd - ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd - ScriptFlush(ctx context.Context) *StatusCmd - ScriptKill(ctx context.Context) *StatusCmd - ScriptLoad(ctx context.Context, script string) *StringCmd - - Publish(ctx context.Context, channel string, message interface{}) *IntCmd - PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd - PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd - PubSubNumPat(ctx context.Context) *IntCmd - - ClusterSlots(ctx context.Context) *ClusterSlotsCmd - ClusterNodes(ctx context.Context) *StringCmd - ClusterMeet(ctx context.Context, host, port string) *StatusCmd - ClusterForget(ctx context.Context, nodeID string) *StatusCmd - ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd - ClusterResetSoft(ctx context.Context) *StatusCmd - ClusterResetHard(ctx context.Context) *StatusCmd - ClusterInfo(ctx context.Context) *StringCmd - ClusterKeySlot(ctx context.Context, key string) *IntCmd - ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd - ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd - ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd - ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd - ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd - ClusterSaveConfig(ctx context.Context) *StatusCmd - ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd - ClusterFailover(ctx context.Context) *StatusCmd - ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd - ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd - - GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd - GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd - GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd - GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd - GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd - GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd - GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd - GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd - GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd - GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd - GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd -} - -type StatefulCmdable interface { - Cmdable - Auth(ctx context.Context, password string) *StatusCmd - AuthACL(ctx context.Context, username, password string) *StatusCmd - Select(ctx context.Context, index int) *StatusCmd - SwapDB(ctx context.Context, index1, index2 int) *StatusCmd - ClientSetName(ctx context.Context, name string) *BoolCmd -} - -var ( - _ Cmdable = (*Client)(nil) - _ Cmdable = (*Tx)(nil) - _ Cmdable = (*Ring)(nil) - _ Cmdable = (*ClusterClient)(nil) -) - -type cmdable func(ctx context.Context, cmd Cmder) error - -type statefulCmdable func(ctx context.Context, cmd Cmder) error - -//------------------------------------------------------------------------------ - -func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { - cmd := NewStatusCmd(ctx, "auth", password) - _ = c(ctx, cmd) - return cmd -} - -// AuthACL Perform an AUTH command, using the given user and pass. -// Should be used to authenticate the current connection with one of the connections defined in the ACL list -// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. -func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { - cmd := NewStatusCmd(ctx, "auth", username, password) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { - cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { - cmd := NewStatusCmd(ctx, "select", index) - _ = c(ctx, cmd) - return cmd -} - -func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { - cmd := NewStatusCmd(ctx, "swapdb", index1, index2) - _ = c(ctx, cmd) - return cmd -} - -// ClientSetName assigns a name to the connection. -func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { - cmd := NewBoolCmd(ctx, "client", "setname", name) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { - cmd := NewCommandsInfoCmd(ctx, "command") - _ = c(ctx, cmd) - return cmd -} - -// ClientGetName returns the name of the connection. -func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { - cmd := NewStringCmd(ctx, "client", "getname") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { - cmd := NewStringCmd(ctx, "echo", message) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Ping(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "ping") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Quit(_ context.Context) *StatusCmd { - panic("not implemented") -} - -func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "del" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "unlink" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Dump(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "dump", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "exists" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "") -} - -func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "NX") -} - -func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "XX") -} - -func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "GT") -} - -func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "LT") -} - -func (c cmdable) expire( - ctx context.Context, key string, expiration time.Duration, mode string, -) *BoolCmd { - args := make([]interface{}, 3, 4) - args[0] = "expire" - args[1] = key - args[2] = formatSec(ctx, expiration) - if mode != "" { - args = append(args, mode) - } - - cmd := NewBoolCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { - cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix()) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "keys", pattern) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "migrate", - host, - port, - key, - db, - formatMs(ctx, timeout), - ) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { - cmd := NewBoolCmd(ctx, "move", key, db) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "object", "refcount", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "object", "encoding", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd { - cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd { - cmd := NewBoolCmd(ctx, "persist", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration)) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { - cmd := NewBoolCmd( - ctx, - "pexpireat", - key, - tm.UnixNano()/int64(time.Millisecond), - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { - cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RandomKey(ctx context.Context) *StringCmd { - cmd := NewStringCmd(ctx, "randomkey") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd { - cmd := NewStatusCmd(ctx, "rename", key, newkey) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd { - cmd := NewBoolCmd(ctx, "renamenx", key, newkey) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "restore", - key, - formatMs(ctx, ttl), - value, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "restore", - key, - formatMs(ctx, ttl), - value, - "replace", - ) - _ = c(ctx, cmd) - return cmd -} - -type Sort struct { - By string - Offset, Count int64 - Get []string - Order string - Alpha bool -} - -func (sort *Sort) args(key string) []interface{} { - args := []interface{}{"sort", key} - if sort.By != "" { - args = append(args, "by", sort.By) - } - if sort.Offset != 0 || sort.Count != 0 { - args = append(args, "limit", sort.Offset, sort.Count) - } - for _, get := range sort.Get { - args = append(args, "get", get) - } - if sort.Order != "" { - args = append(args, sort.Order) - } - if sort.Alpha { - args = append(args, "alpha") - } - return args -} - -func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, sort.args(key)...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { - args := sort.args(key) - if store != "" { - args = append(args, "store", store) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { - cmd := NewSliceCmd(ctx, sort.args(key)...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, len(keys)+1) - args[0] = "touch" - for i, key := range keys { - args[i+1] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd { - cmd := NewDurationCmd(ctx, time.Second, "ttl", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Type(ctx context.Context, key string) *StatusCmd { - cmd := NewStatusCmd(ctx, "type", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd { - cmd := NewIntCmd(ctx, "append", key, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Decr(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "decr", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd { - cmd := NewIntCmd(ctx, "decrby", key, decrement) - _ = c(ctx, cmd) - return cmd -} - -// Get Redis `GET key` command. It returns redis.Nil error when key does not exist. -func (c cmdable) Get(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "get", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd { - cmd := NewStringCmd(ctx, "getrange", key, start, end) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { - cmd := NewStringCmd(ctx, "getset", key, value) - _ = c(ctx, cmd) - return cmd -} - -// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist). -// Requires Redis >= 6.2.0. -func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd { - args := make([]interface{}, 0, 4) - args = append(args, "getex", key) - if expiration > 0 { - if usePrecise(expiration) { - args = append(args, "px", formatMs(ctx, expiration)) - } else { - args = append(args, "ex", formatSec(ctx, expiration)) - } - } else if expiration == 0 { - args = append(args, "persist") - } - - cmd := NewStringCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// GetDel redis-server version >= 6.2.0. -func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "getdel", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Incr(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "incr", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd { - cmd := NewIntCmd(ctx, "incrby", key, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd { - cmd := NewFloatCmd(ctx, "incrbyfloat", key, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "mget" - for i, key := range keys { - args[1+i] = key - } - cmd := NewSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// MSet is like Set but accepts multiple values: -// - MSet("key1", "value1", "key2", "value2") -// - MSet([]string{"key1", "value1", "key2", "value2"}) -// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) -func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { - args := make([]interface{}, 1, 1+len(values)) - args[0] = "mset" - args = appendArgs(args, values) - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// MSetNX is like SetNX but accepts multiple values: -// - MSetNX("key1", "value1", "key2", "value2") -// - MSetNX([]string{"key1", "value1", "key2", "value2"}) -// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) -func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { - args := make([]interface{}, 1, 1+len(values)) - args[0] = "msetnx" - args = appendArgs(args, values) - cmd := NewBoolCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// Set Redis `SET key value [expiration]` command. -// Use expiration for `SETEX`-like behavior. -// -// Zero expiration means the key has no expiration time. -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { - args := make([]interface{}, 3, 5) - args[0] = "set" - args[1] = key - args[2] = value - if expiration > 0 { - if usePrecise(expiration) { - args = append(args, "px", formatMs(ctx, expiration)) - } else { - args = append(args, "ex", formatSec(ctx, expiration)) - } - } else if expiration == KeepTTL { - args = append(args, "keepttl") - } - - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// SetArgs provides arguments for the SetArgs function. -type SetArgs struct { - // Mode can be `NX` or `XX` or empty. - Mode string - - // Zero `TTL` or `Expiration` means that the key has no expiration time. - TTL time.Duration - ExpireAt time.Time - - // When Get is true, the command returns the old value stored at key, or nil when key did not exist. - Get bool - - // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, - // otherwise you will receive an error: (error) ERR syntax error. - KeepTTL bool -} - -// SetArgs supports all the options that the SET command supports. -// It is the alternative to the Set function when you want -// to have more control over the options. -func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd { - args := []interface{}{"set", key, value} - - if a.KeepTTL { - args = append(args, "keepttl") - } - - if !a.ExpireAt.IsZero() { - args = append(args, "exat", a.ExpireAt.Unix()) - } - if a.TTL > 0 { - if usePrecise(a.TTL) { - args = append(args, "px", formatMs(ctx, a.TTL)) - } else { - args = append(args, "ex", formatSec(ctx, a.TTL)) - } - } - - if a.Mode != "" { - args = append(args, a.Mode) - } - - if a.Get { - args = append(args, "get") - } - - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// SetEX Redis `SETEX key expiration value` command. -func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { - cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) - _ = c(ctx, cmd) - return cmd -} - -// SetNX Redis `SET key value [expiration] NX` command. -// -// Zero expiration means the key has no expiration time. -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { - var cmd *BoolCmd - switch expiration { - case 0: - // Use old `SETNX` to support old Redis versions. - cmd = NewBoolCmd(ctx, "setnx", key, value) - case KeepTTL: - cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx") - default: - if usePrecise(expiration) { - cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx") - } else { - cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx") - } - } - - _ = c(ctx, cmd) - return cmd -} - -// SetXX Redis `SET key value [expiration] XX` command. -// -// Zero expiration means the key has no expiration time. -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { - var cmd *BoolCmd - switch expiration { - case 0: - cmd = NewBoolCmd(ctx, "set", key, value, "xx") - case KeepTTL: - cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx") - default: - if usePrecise(expiration) { - cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx") - } else { - cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx") - } - } - - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { - cmd := NewIntCmd(ctx, "setrange", key, offset, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "strlen", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd { - args := []interface{}{"copy", sourceKey, destKey, "DB", db} - if replace { - args = append(args, "REPLACE") - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { - cmd := NewIntCmd(ctx, "getbit", key, offset) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd { - cmd := NewIntCmd( - ctx, - "setbit", - key, - offset, - value, - ) - _ = c(ctx, cmd) - return cmd -} - -type BitCount struct { - Start, End int64 -} - -func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { - args := []interface{}{"bitcount", key} - if bitCount != nil { - args = append( - args, - bitCount.Start, - bitCount.End, - ) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd { - args := make([]interface{}, 3+len(keys)) - args[0] = "bitop" - args[1] = op - args[2] = destKey - for i, key := range keys { - args[3+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { - return c.bitOp(ctx, "and", destKey, keys...) -} - -func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { - return c.bitOp(ctx, "or", destKey, keys...) -} - -func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { - return c.bitOp(ctx, "xor", destKey, keys...) -} - -func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { - return c.bitOp(ctx, "not", destKey, key) -} - -func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { - args := make([]interface{}, 3+len(pos)) - args[0] = "bitpos" - args[1] = key - args[2] = bit - switch len(pos) { - case 0: - case 1: - args[3] = pos[0] - case 2: - args[3] = pos[0] - args[4] = pos[1] - default: - panic("too many arguments") - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { - a := make([]interface{}, 0, 2+len(args)) - a = append(a, "bitfield") - a = append(a, key) - a = append(a, args...) - cmd := NewIntSliceCmd(ctx, a...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"scan", cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd { - args := []interface{}{"scan", cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - if keyType != "" { - args = append(args, "type", keyType) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"sscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"hscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"zscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { - args := make([]interface{}, 2+len(fields)) - args[0] = "hdel" - args[1] = key - for i, field := range fields { - args[2+i] = field - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd { - cmd := NewBoolCmd(ctx, "hexists", key, field) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { - cmd := NewStringCmd(ctx, "hget", key, field) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd { - cmd := NewStringStringMapCmd(ctx, "hgetall", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd { - cmd := NewIntCmd(ctx, "hincrby", key, field, incr) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd { - cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "hkeys", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HLen(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "hlen", key) - _ = c(ctx, cmd) - return cmd -} - -// HMGet returns the values for the specified fields in the hash stored at key. -// It returns an interface{} to distinguish between empty string and nil value. -func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd { - args := make([]interface{}, 2+len(fields)) - args[0] = "hmget" - args[1] = key - for i, field := range fields { - args[2+i] = field - } - cmd := NewSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// HSet accepts values in following formats: -// - HSet("myhash", "key1", "value1", "key2", "value2") -// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) -// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) -// -// Note that it requires Redis v4 for multiple field/value pairs support. -func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "hset" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// HMSet is a deprecated version of HSet left for compatibility with Redis 3. -func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "hmset" - args[1] = key - args = appendArgs(args, values) - cmd := NewBoolCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd { - cmd := NewBoolCmd(ctx, "hsetnx", key, field, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "hvals", key) - _ = c(ctx, cmd) - return cmd -} - -// HRandField redis-server version >= 6.2.0. -func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd { - args := make([]interface{}, 0, 4) - - // Although count=0 is meaningless, redis accepts count=0. - args = append(args, "hrandfield", key, count) - if withValues { - args = append(args, "withvalues") - } - - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "blpop" - for i, key := range keys { - args[1+i] = key - } - args[len(args)-1] = formatSec(ctx, timeout) - cmd := NewStringSliceCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "brpop" - for i, key := range keys { - args[1+i] = key - } - args[len(keys)+1] = formatSec(ctx, timeout) - cmd := NewStringSliceCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { - cmd := NewStringCmd( - ctx, - "brpoplpush", - source, - destination, - formatSec(ctx, timeout), - ) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { - cmd := NewStringCmd(ctx, "lindex", key, index) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "llen", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "lpop", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "lpop", key, count) - _ = c(ctx, cmd) - return cmd -} - -type LPosArgs struct { - Rank, MaxLen int64 -} - -func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd { - args := []interface{}{"lpos", key, value} - if a.Rank != 0 { - args = append(args, "rank", a.Rank) - } - if a.MaxLen != 0 { - args = append(args, "maxlen", a.MaxLen) - } - - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd { - args := []interface{}{"lpos", key, value, "count", count} - if a.Rank != 0 { - args = append(args, "rank", a.Rank) - } - if a.MaxLen != 0 { - args = append(args, "maxlen", a.MaxLen) - } - cmd := NewIntSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "lpush" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "lpushx" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { - cmd := NewStringSliceCmd( - ctx, - "lrange", - key, - start, - stop, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "lrem", key, count, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { - cmd := NewStatusCmd(ctx, "lset", key, index, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "ltrim", - key, - start, - stop, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "rpop", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "rpop", key, count) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { - cmd := NewStringCmd(ctx, "rpoplpush", source, destination) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "rpush" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "rpushx" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd { - cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BLMove( - ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration, -) *StringCmd { - cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout)) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "sadd" - args[1] = key - args = appendArgs(args, members) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "scard", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sdiff" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sdiffstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sinter" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sinterstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { - cmd := NewBoolCmd(ctx, "sismember", key, member) - _ = c(ctx, cmd) - return cmd -} - -// SMIsMember Redis `SMISMEMBER key member [member ...]` command. -func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "smismember" - args[1] = key - args = appendArgs(args, members) - cmd := NewBoolSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// SMembers Redis `SMEMBERS key` command output as a slice. -func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "smembers", key) - _ = c(ctx, cmd) - return cmd -} - -// SMembersMap Redis `SMEMBERS key` command output as a map. -func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { - cmd := NewStringStructMapCmd(ctx, "smembers", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { - cmd := NewBoolCmd(ctx, "smove", source, destination, member) - _ = c(ctx, cmd) - return cmd -} - -// SPop Redis `SPOP key` command. -func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "spop", key) - _ = c(ctx, cmd) - return cmd -} - -// SPopN Redis `SPOP key count` command. -func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "spop", key, count) - _ = c(ctx, cmd) - return cmd -} - -// SRandMember Redis `SRANDMEMBER key` command. -func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "srandmember", key) - _ = c(ctx, cmd) - return cmd -} - -// SRandMemberN Redis `SRANDMEMBER key count` command. -func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "srandmember", key, count) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "srem" - args[1] = key - args = appendArgs(args, members) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sunion" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sunionstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -// XAddArgs accepts values in the following formats: -// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} -// - XAddArgs.Values = []string("key1", "value1", "key2", "value2") -// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} -// -// Note that map will not preserve the order of key-value pairs. -// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. -type XAddArgs struct { - Stream string - NoMkStream bool - MaxLen int64 // MAXLEN N - - // Deprecated: use MaxLen+Approx, remove in v9. - MaxLenApprox int64 // MAXLEN ~ N - - MinID string - // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). - Approx bool - Limit int64 - ID string - Values interface{} -} - -// XAdd a.Limit has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { - args := make([]interface{}, 0, 11) - args = append(args, "xadd", a.Stream) - if a.NoMkStream { - args = append(args, "nomkstream") - } - switch { - case a.MaxLen > 0: - if a.Approx { - args = append(args, "maxlen", "~", a.MaxLen) - } else { - args = append(args, "maxlen", a.MaxLen) - } - case a.MaxLenApprox > 0: - // TODO remove in v9. - args = append(args, "maxlen", "~", a.MaxLenApprox) - case a.MinID != "": - if a.Approx { - args = append(args, "minid", "~", a.MinID) - } else { - args = append(args, "minid", a.MinID) - } - } - if a.Limit > 0 { - args = append(args, "limit", a.Limit) - } - if a.ID != "" { - args = append(args, a.ID) - } else { - args = append(args, "*") - } - args = appendArg(args, a.Values) - - cmd := NewStringCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { - args := []interface{}{"xdel", stream} - for _, id := range ids { - args = append(args, id) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { - cmd := NewIntCmd(ctx, "xlen", stream) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) - _ = c(ctx, cmd) - return cmd -} - -type XReadArgs struct { - Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 - Count int64 - Block time.Duration -} - -func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 6+len(a.Streams)) - args = append(args, "xread") - - keyPos := int8(1) - if a.Count > 0 { - args = append(args, "count") - args = append(args, a.Count) - keyPos += 2 - } - if a.Block >= 0 { - args = append(args, "block") - args = append(args, int64(a.Block/time.Millisecond)) - keyPos += 2 - } - args = append(args, "streams") - keyPos++ - for _, s := range a.Streams { - args = append(args, s) - } - - cmd := NewXStreamSliceCmd(ctx, args...) - if a.Block >= 0 { - cmd.setReadTimeout(a.Block) - } - cmd.SetFirstKeyPos(keyPos) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { - return c.XRead(ctx, &XReadArgs{ - Streams: streams, - Block: -1, - }) -} - -func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { - cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { - cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { - cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { - cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { - cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { - cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) - _ = c(ctx, cmd) - return cmd -} - -type XReadGroupArgs struct { - Group string - Consumer string - Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 - Count int64 - Block time.Duration - NoAck bool -} - -func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 10+len(a.Streams)) - args = append(args, "xreadgroup", "group", a.Group, a.Consumer) - - keyPos := int8(4) - if a.Count > 0 { - args = append(args, "count", a.Count) - keyPos += 2 - } - if a.Block >= 0 { - args = append(args, "block", int64(a.Block/time.Millisecond)) - keyPos += 2 - } - if a.NoAck { - args = append(args, "noack") - keyPos++ - } - args = append(args, "streams") - keyPos++ - for _, s := range a.Streams { - args = append(args, s) - } - - cmd := NewXStreamSliceCmd(ctx, args...) - if a.Block >= 0 { - cmd.setReadTimeout(a.Block) - } - cmd.SetFirstKeyPos(keyPos) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { - args := []interface{}{"xack", stream, group} - for _, id := range ids { - args = append(args, id) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { - cmd := NewXPendingCmd(ctx, "xpending", stream, group) - _ = c(ctx, cmd) - return cmd -} - -type XPendingExtArgs struct { - Stream string - Group string - Idle time.Duration - Start string - End string - Count int64 - Consumer string -} - -func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { - args := make([]interface{}, 0, 9) - args = append(args, "xpending", a.Stream, a.Group) - if a.Idle != 0 { - args = append(args, "idle", formatMs(ctx, a.Idle)) - } - args = append(args, a.Start, a.End, a.Count) - if a.Consumer != "" { - args = append(args, a.Consumer) - } - cmd := NewXPendingExtCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -type XAutoClaimArgs struct { - Stream string - Group string - MinIdle time.Duration - Start string - Count int64 - Consumer string -} - -func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd { - args := xAutoClaimArgs(ctx, a) - cmd := NewXAutoClaimCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd { - args := xAutoClaimArgs(ctx, a) - args = append(args, "justid") - cmd := NewXAutoClaimJustIDCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { - args := make([]interface{}, 0, 8) - args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) - if a.Count > 0 { - args = append(args, "count", a.Count) - } - return args -} - -type XClaimArgs struct { - Stream string - Group string - Consumer string - MinIdle time.Duration - Messages []string -} - -func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { - args := xClaimArgs(a) - cmd := NewXMessageSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { - args := xClaimArgs(a) - args = append(args, "justid") - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func xClaimArgs(a *XClaimArgs) []interface{} { - args := make([]interface{}, 0, 5+len(a.Messages)) - args = append(args, - "xclaim", - a.Stream, - a.Group, a.Consumer, - int64(a.MinIdle/time.Millisecond)) - for _, id := range a.Messages { - args = append(args, id) - } - return args -} - -// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). -// example: -// XTRIM key MAXLEN/MINID threshold LIMIT limit. -// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. -// The redis-server version is lower than 6.2, please set limit to 0. -func (c cmdable) xTrim( - ctx context.Context, key, strategy string, - approx bool, threshold interface{}, limit int64, -) *IntCmd { - args := make([]interface{}, 0, 7) - args = append(args, "xtrim", key, strategy) - if approx { - args = append(args, "~") - } - args = append(args, threshold) - if limit > 0 { - args = append(args, "limit", limit) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// Deprecated: use XTrimMaxLen, remove in v9. -func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) -} - -// Deprecated: use XTrimMaxLenApprox, remove in v9. -func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", true, maxLen, 0) -} - -// XTrimMaxLen No `~` rules are used, `limit` cannot be used. -// cmd: XTRIM key MAXLEN maxLen -func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) -} - -// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit -func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) -} - -// XTrimMinID No `~` rules are used, `limit` cannot be used. -// cmd: XTRIM key MINID minID -func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { - return c.xTrim(ctx, key, "minid", false, minID, 0) -} - -// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MINID ~ minID LIMIT limit -func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { - return c.xTrim(ctx, key, "minid", true, minID, limit) -} - -func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { - cmd := NewXInfoConsumersCmd(ctx, key, group) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { - cmd := NewXInfoGroupsCmd(ctx, key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd { - cmd := NewXInfoStreamCmd(ctx, key) - _ = c(ctx, cmd) - return cmd -} - -// XInfoStreamFull XINFO STREAM FULL [COUNT count] -// redis-server >= 6.0. -func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd { - args := make([]interface{}, 0, 6) - args = append(args, "xinfo", "stream", key, "full") - if count > 0 { - args = append(args, "count", count) - } - cmd := NewXInfoStreamFullCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -// Z represents sorted set member. -type Z struct { - Score float64 - Member interface{} -} - -// ZWithKey represents sorted set member including the name of the key where it was popped. -type ZWithKey struct { - Z - Key string -} - -// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore. -type ZStore struct { - Keys []string - Weights []float64 - // Can be SUM, MIN or MAX. - Aggregate string -} - -func (z ZStore) len() (n int) { - n = len(z.Keys) - if len(z.Weights) > 0 { - n += 1 + len(z.Weights) - } - if z.Aggregate != "" { - n += 2 - } - return n -} - -func (z ZStore) appendArgs(args []interface{}) []interface{} { - for _, key := range z.Keys { - args = append(args, key) - } - if len(z.Weights) > 0 { - args = append(args, "weights") - for _, weights := range z.Weights { - args = append(args, weights) - } - } - if z.Aggregate != "" { - args = append(args, "aggregate", z.Aggregate) - } - return args -} - -// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command. -func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "bzpopmax" - for i, key := range keys { - args[1+i] = key - } - args[len(args)-1] = formatSec(ctx, timeout) - cmd := NewZWithKeyCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command. -func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "bzpopmin" - for i, key := range keys { - args[1+i] = key - } - args[len(args)-1] = formatSec(ctx, timeout) - cmd := NewZWithKeyCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. -type ZAddArgs struct { - NX bool - XX bool - LT bool - GT bool - Ch bool - Members []Z -} - -func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} { - a := make([]interface{}, 0, 6+2*len(args.Members)) - a = append(a, "zadd", key) - - // The GT, LT and NX options are mutually exclusive. - if args.NX { - a = append(a, "nx") - } else { - if args.XX { - a = append(a, "xx") - } - if args.GT { - a = append(a, "gt") - } else if args.LT { - a = append(a, "lt") - } - } - if args.Ch { - a = append(a, "ch") - } - if incr { - a = append(a, "incr") - } - for _, m := range args.Members { - a = append(a, m.Score) - a = append(a, m.Member) - } - return a -} - -func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd { - cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd { - cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...) - _ = c(ctx, cmd) - return cmd -} - -// TODO: Compatible with v8 api, will be removed in v9. -func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd { - args.Members = make([]Z, len(members)) - for i, m := range members { - args.Members[i] = *m - } - cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) - _ = c(ctx, cmd) - return cmd -} - -// ZAdd Redis `ZADD key score member [score member ...]` command. -func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{}, members...) -} - -// ZAddNX Redis `ZADD key NX score member [score member ...]` command. -func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - NX: true, - }, members...) -} - -// ZAddXX Redis `ZADD key XX score member [score member ...]` command. -func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - XX: true, - }, members...) -} - -// ZAddCh Redis `ZADD key CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - Ch: true, - }, members...) -} - -// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// NX: true, -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - NX: true, - Ch: true, - }, members...) -} - -// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// XX: true, -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - XX: true, - Ch: true, - }, members...) -} - -// ZIncr Redis `ZADD key INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ - Members: []Z{*member}, - }) -} - -// ZIncrNX Redis `ZADD key NX INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// NX: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ - NX: true, - Members: []Z{*member}, - }) -} - -// ZIncrXX Redis `ZADD key XX INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// XX: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ - XX: true, - Members: []Z{*member}, - }) -} - -func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "zcard", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zcount", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zlexcount", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd { - cmd := NewFloatCmd(ctx, "zincrby", key, increment, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zinterstore", destination, len(store.Keys)) - args = store.appendArgs(args) - cmd := NewIntCmd(ctx, args...) - cmd.SetFirstKeyPos(3) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd { - args := make([]interface{}, 0, 2+store.len()) - args = append(args, "zinter", len(store.Keys)) - args = store.appendArgs(args) - cmd := NewStringSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zinter", len(store.Keys)) - args = store.appendArgs(args) - args = append(args, "withscores") - cmd := NewZSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { - args := make([]interface{}, 2+len(members)) - args[0] = "zmscore" - args[1] = key - for i, member := range members { - args[2+i] = member - } - cmd := NewFloatSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd { - args := []interface{}{ - "zpopmax", - key, - } - - switch len(count) { - case 0: - break - case 1: - args = append(args, count[0]) - default: - panic("too many arguments") - } - - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd { - args := []interface{}{ - "zpopmin", - key, - } - - switch len(count) { - case 0: - break - case 1: - args = append(args, count[0]) - default: - panic("too many arguments") - } - - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// ZRangeArgs is all the options of the ZRange command. -// In version> 6.2.0, you can replace the(cmd): -// ZREVRANGE, -// ZRANGEBYSCORE, -// ZREVRANGEBYSCORE, -// ZRANGEBYLEX, -// ZREVRANGEBYLEX. -// Please pay attention to your redis-server version. -// -// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. -type ZRangeArgs struct { - Key string - - // When the ByScore option is provided, the open interval(exclusive) can be set. - // By default, the score intervals specified by and are closed (inclusive). - // It is similar to the deprecated(6.2.0+) ZRangeByScore command. - // For example: - // ZRangeArgs{ - // Key: "example-key", - // Start: "(3", - // Stop: 8, - // ByScore: true, - // } - // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8). - // - // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command. - // You can set the and options as follows: - // ZRangeArgs{ - // Key: "example-key", - // Start: "[abc", - // Stop: "(def", - // ByLex: true, - // } - // cmd: "ZRange example-key [abc (def ByLex" - // - // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int). - // You can read the documentation for more information: https://redis.io/commands/zrange - Start interface{} - Stop interface{} - - // The ByScore and ByLex options are mutually exclusive. - ByScore bool - ByLex bool - - Rev bool - - // limit offset count. - Offset int64 - Count int64 -} - -func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} { - // For Rev+ByScore/ByLex, we need to adjust the position of and . - if z.Rev && (z.ByScore || z.ByLex) { - args = append(args, z.Key, z.Stop, z.Start) - } else { - args = append(args, z.Key, z.Start, z.Stop) - } - - if z.ByScore { - args = append(args, "byscore") - } else if z.ByLex { - args = append(args, "bylex") - } - if z.Rev { - args = append(args, "rev") - } - if z.Offset != 0 || z.Count != 0 { - args = append(args, "limit", z.Offset, z.Count) - } - return args -} - -func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd { - args := make([]interface{}, 0, 9) - args = append(args, "zrange") - args = z.appendArgs(args) - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd { - args := make([]interface{}, 0, 10) - args = append(args, "zrange") - args = z.appendArgs(args) - args = append(args, "withscores") - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { - return c.ZRangeArgs(ctx, ZRangeArgs{ - Key: key, - Start: start, - Stop: stop, - }) -} - -func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { - return c.ZRangeArgsWithScores(ctx, ZRangeArgs{ - Key: key, - Start: start, - Stop: stop, - }) -} - -type ZRangeBy struct { - Min, Max string - Offset, Count int64 -} - -func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd { - args := []interface{}{zcmd, key, opt.Min, opt.Max} - if withScores { - args = append(args, "withscores") - } - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) -} - -func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRangeBy(ctx, "zrangebylex", key, opt, false) -} - -func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { - args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd { - args := make([]interface{}, 0, 10) - args = append(args, "zrangestore", dst) - args = z.appendArgs(args) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { - cmd := NewIntCmd(ctx, "zrank", key, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "zrem" - args[1] = key - args = appendArgs(args, members) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd { - cmd := NewIntCmd( - ctx, - "zremrangebyrank", - key, - start, - stop, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { - cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd { - args := []interface{}{zcmd, key, opt.Max, opt.Min} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) -} - -func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) -} - -func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { - args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { - cmd := NewIntCmd(ctx, "zrevrank", key, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { - cmd := NewFloatCmd(ctx, "zscore", key, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd { - args := make([]interface{}, 0, 2+store.len()) - args = append(args, "zunion", len(store.Keys)) - args = store.appendArgs(args) - cmd := NewStringSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zunion", len(store.Keys)) - args = store.appendArgs(args) - args = append(args, "withscores") - cmd := NewZSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zunionstore", dest, len(store.Keys)) - args = store.appendArgs(args) - cmd := NewIntCmd(ctx, args...) - cmd.SetFirstKeyPos(3) - _ = c(ctx, cmd) - return cmd -} - -// ZRandMember redis-server version >= 6.2.0. -func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd { - args := make([]interface{}, 0, 4) - - // Although count=0 is meaningless, redis accepts count=0. - args = append(args, "zrandmember", key, count) - if withScores { - args = append(args, "withscores") - } - - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// ZDiff redis-server version >= 6.2.0. -func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "zdiff" - args[1] = len(keys) - for i, key := range keys { - args[i+2] = key - } - - cmd := NewStringSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -// ZDiffWithScores redis-server version >= 6.2.0. -func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd { - args := make([]interface{}, 3+len(keys)) - args[0] = "zdiff" - args[1] = len(keys) - for i, key := range keys { - args[i+2] = key - } - args[len(keys)+2] = "withscores" - - cmd := NewZSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -// ZDiffStore redis-server version >=6.2.0. -func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 0, 3+len(keys)) - args = append(args, "zdiffstore", destination, len(keys)) - for _, key := range keys { - args = append(args, key) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(els)) - args[0] = "pfadd" - args[1] = key - args = appendArgs(args, els) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "pfcount" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "pfmerge" - args[1] = dest - for i, key := range keys { - args[2+i] = key - } - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "bgrewriteaof") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BgSave(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "bgsave") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { - cmd := NewStatusCmd(ctx, "client", "kill", ipPort) - _ = c(ctx, cmd) - return cmd -} - -// ClientKillByFilter is new style syntax, while the ClientKill is old -// -// CLIENT KILL